code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=6 , a__=17 , a__=23 , a__=11 , a__=True , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : Optional[int] = act_dim
_lowerCAmelCase : Any = state_dim
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = max_length
_lowerCAmelCase : int = is_training
def __A ( self ):
_lowerCAmelCase : List[str] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_lowerCAmelCase : Any = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_lowerCAmelCase : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCAmelCase : Any = floats_tensor((self.batch_size, self.seq_length, 1) )
_lowerCAmelCase : Any = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
_lowerCAmelCase : List[str] = random_attention_mask((self.batch_size, self.seq_length) )
_lowerCAmelCase : str = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __A ( self ):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : Any = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __A ( self ):
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCAmelCase : Dict = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class __A ( __a , __a , __a , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (DecisionTransformerModel,) if is_torch_available() else ()
_UpperCamelCase : List[str] = ()
_UpperCamelCase : Optional[int] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_UpperCamelCase : Optional[int] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : str = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : str = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : List[str] = False
def __A ( self ):
_lowerCAmelCase : Any = DecisionTransformerModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __A ( self ):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case__ )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : str = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : int = 2 # number of steps of autoregressive prediction we will perform
_lowerCAmelCase : str = 10 # defined by the RL environment, may be normalized
_lowerCAmelCase : str = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
_lowerCAmelCase : Dict = model.to(snake_case__ )
_lowerCAmelCase : str = model.config
torch.manual_seed(0 )
_lowerCAmelCase : Any = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
_lowerCAmelCase : List[Any] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=snake_case__ )
_lowerCAmelCase : List[Any] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_lowerCAmelCase : int = state
_lowerCAmelCase : List[str] = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
_lowerCAmelCase : Dict = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
_lowerCAmelCase : Union[str, Any] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
_lowerCAmelCase : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
_lowerCAmelCase : Union[str, Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
_lowerCAmelCase : List[Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
_lowerCAmelCase : Union[str, Any] = action_pred[0, -1]
_lowerCAmelCase : List[Any] = torch.cat([states, state] , dim=1 )
_lowerCAmelCase : List[str] = returns_to_go[0, -1] - reward
_lowerCAmelCase : Tuple = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_lowerCAmelCase : Any = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Any: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_snake_case )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Tuple = [0] * n
_lowerCAmelCase : List[str] = [False] * n
_lowerCAmelCase : Optional[Any] = [False] * n
def dfs(_lowerCamelCase : str ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Tuple ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : int = dfs(_snake_case ,_snake_case ,_snake_case ,_snake_case )
_lowerCAmelCase : List[Any] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : List[Any] = True
else:
_lowerCAmelCase : int = min(low[at] ,_snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = dfs(_snake_case ,_snake_case ,-1 ,_snake_case )
_lowerCAmelCase : Optional[Any] = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_a : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_a : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( lowerCAmelCase__ ):
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
_lowerCAmelCase : Tuple = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase : List[str] = dict(scheduler.config )
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Optional[int] = FrozenDict(_SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
_lowerCAmelCase : int = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _SCREAMING_SNAKE_CASE , standard_warn=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Tuple = dict(scheduler.config )
_lowerCAmelCase : str = True
_lowerCAmelCase : str = FrozenDict(_SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_SCREAMING_SNAKE_CASE , segmentation_processor=_SCREAMING_SNAKE_CASE , vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , )
def __A ( self , a__ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def __A ( self ):
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
def __A ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : Dict = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 50 , a__ = 7.5 , a__ = None , a__ = 1 , a__ = 0.0 , a__ = None , a__ = None , a__ = "pil" , a__ = True , a__ = None , a__ = 1 , **a__ , ):
_lowerCAmelCase : Any = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
_lowerCAmelCase : str = self.segmentation_model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_lowerCAmelCase : Any = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_lowerCAmelCase : Optional[int] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , )
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_a : Optional[Any] = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1_000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_a : Optional[int] = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1_000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_a : List[str] = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
_a : Union[str, Any] = {
'num_train_timesteps': 40,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
_a : Optional[int] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
_a : Any = {
'num_train_timesteps': 151,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> str:
if isinstance(A_ ,A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Any ,_lowerCamelCase : Union[str, Any]=False ) -> Union[str, Any]:
_lowerCAmelCase : Optional[int] = checkpoint[f"{old_prefix}.in_layers.0.weight"]
_lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.in_layers.0.bias"]
_lowerCAmelCase : Optional[int] = checkpoint[f"{old_prefix}.in_layers.2.weight"]
_lowerCAmelCase : List[Any] = checkpoint[f"{old_prefix}.in_layers.2.bias"]
_lowerCAmelCase : Union[str, Any] = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
_lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
_lowerCAmelCase : Any = checkpoint[f"{old_prefix}.out_layers.0.weight"]
_lowerCAmelCase : List[str] = checkpoint[f"{old_prefix}.out_layers.0.bias"]
_lowerCAmelCase : Optional[int] = checkpoint[f"{old_prefix}.out_layers.3.weight"]
_lowerCAmelCase : Optional[int] = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
_lowerCAmelCase : Union[str, Any] = checkpoint[f"{old_prefix}.skip_connection.weight"]
_lowerCAmelCase : List[str] = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Tuple=None ) -> Any:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 ,dim=0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 ,dim=0 )
_lowerCAmelCase : Union[str, Any] = checkpoint[f"{old_prefix}.norm.weight"]
_lowerCAmelCase : List[str] = checkpoint[f"{old_prefix}.norm.bias"]
_lowerCAmelCase : Any = weight_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase : List[str] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase : List[str] = bias_k.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase : Any = weight_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase : str = bias_v.squeeze(-1 ).squeeze(-1 )
_lowerCAmelCase : str = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
_lowerCAmelCase : Tuple = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[str] ) -> str:
_lowerCAmelCase : int = torch.load(A_ ,map_location="""cpu""" )
_lowerCAmelCase : Union[str, Any] = {}
_lowerCAmelCase : Tuple = checkpoint["""time_embed.0.weight"""]
_lowerCAmelCase : List[Any] = checkpoint["""time_embed.0.bias"""]
_lowerCAmelCase : Optional[int] = checkpoint["""time_embed.2.weight"""]
_lowerCAmelCase : List[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
_lowerCAmelCase : Union[str, Any] = checkpoint["""label_emb.weight"""]
_lowerCAmelCase : Optional[Any] = checkpoint["""input_blocks.0.0.weight"""]
_lowerCAmelCase : Dict = checkpoint["""input_blocks.0.0.bias"""]
_lowerCAmelCase : List[Any] = unet_config["""down_block_types"""]
_lowerCAmelCase : Any = unet_config["""layers_per_block"""]
_lowerCAmelCase : Union[str, Any] = unet_config["""attention_head_dim"""]
_lowerCAmelCase : Optional[Any] = unet_config["""block_out_channels"""]
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : Optional[int] = channels_list[0]
for i, layer_type in enumerate(A_ ):
_lowerCAmelCase : List[Any] = channels_list[i]
_lowerCAmelCase : Union[str, Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
_lowerCAmelCase : Any = f"down_blocks.{i}.resnets.{j}"
_lowerCAmelCase : Optional[Any] = f"input_blocks.{current_layer}.0"
_lowerCAmelCase : List[Any] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase : Union[str, Any] = convert_resnet(A_ ,A_ ,A_ ,A_ ,has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
_lowerCAmelCase : Union[str, Any] = f"down_blocks.{i}.resnets.{j}"
_lowerCAmelCase : str = f"input_blocks.{current_layer}.0"
_lowerCAmelCase : Optional[Any] = True if j == 0 and downsample_block_has_skip else False
_lowerCAmelCase : Any = convert_resnet(A_ ,A_ ,A_ ,A_ ,has_skip=A_ )
_lowerCAmelCase : List[str] = f"down_blocks.{i}.attentions.{j}"
_lowerCAmelCase : List[str] = f"input_blocks.{current_layer}.1"
_lowerCAmelCase : int = convert_attention(
A_ ,A_ ,A_ ,A_ ,A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCAmelCase : List[str] = f"down_blocks.{i}.downsamplers.0"
_lowerCAmelCase : List[str] = f"input_blocks.{current_layer}.0"
_lowerCAmelCase : List[Any] = convert_resnet(A_ ,A_ ,A_ ,A_ )
current_layer += 1
_lowerCAmelCase : Tuple = current_channels
# hardcoded the mid-block for now
_lowerCAmelCase : Tuple = """mid_block.resnets.0"""
_lowerCAmelCase : Optional[int] = """middle_block.0"""
_lowerCAmelCase : List[str] = convert_resnet(A_ ,A_ ,A_ ,A_ )
_lowerCAmelCase : int = """mid_block.attentions.0"""
_lowerCAmelCase : List[Any] = """middle_block.1"""
_lowerCAmelCase : int = convert_attention(A_ ,A_ ,A_ ,A_ ,A_ )
_lowerCAmelCase : Dict = """mid_block.resnets.1"""
_lowerCAmelCase : str = """middle_block.2"""
_lowerCAmelCase : Optional[int] = convert_resnet(A_ ,A_ ,A_ ,A_ )
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Dict = unet_config["""up_block_types"""]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase : Optional[Any] = f"up_blocks.{i}.resnets.{j}"
_lowerCAmelCase : Union[str, Any] = f"output_blocks.{current_layer}.0"
_lowerCAmelCase : str = convert_resnet(A_ ,A_ ,A_ ,A_ ,has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCAmelCase : Optional[int] = f"up_blocks.{i}.upsamplers.0"
_lowerCAmelCase : str = f"output_blocks.{current_layer-1}.1"
_lowerCAmelCase : Any = convert_resnet(A_ ,A_ ,A_ ,A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowerCAmelCase : int = f"up_blocks.{i}.resnets.{j}"
_lowerCAmelCase : str = f"output_blocks.{current_layer}.0"
_lowerCAmelCase : Tuple = convert_resnet(A_ ,A_ ,A_ ,A_ ,has_skip=A_ )
_lowerCAmelCase : Optional[int] = f"up_blocks.{i}.attentions.{j}"
_lowerCAmelCase : Optional[Any] = f"output_blocks.{current_layer}.1"
_lowerCAmelCase : List[str] = convert_attention(
A_ ,A_ ,A_ ,A_ ,A_ )
current_layer += 1
if i != len(A_ ) - 1:
_lowerCAmelCase : List[str] = f"up_blocks.{i}.upsamplers.0"
_lowerCAmelCase : str = f"output_blocks.{current_layer-1}.2"
_lowerCAmelCase : Any = convert_resnet(A_ ,A_ ,A_ ,A_ )
_lowerCAmelCase : List[str] = checkpoint["""out.0.weight"""]
_lowerCAmelCase : str = checkpoint["""out.0.bias"""]
_lowerCAmelCase : Union[str, Any] = checkpoint["""out.2.weight"""]
_lowerCAmelCase : int = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
_a : int = parser.parse_args()
_a : int = strabool(args.class_cond)
_a : Tuple = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
_a : int = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_a : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_a : int = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
_a : str = None
_a : Union[str, Any] = con_pt_to_diffuser(args.unet_path, unet_config)
_a : Optional[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_a : str = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_a : List[str] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_a : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
_a : Union[str, Any] = CMStochasticIterativeScheduler(**scheduler_config)
_a : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger(__name__)
_a : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_a : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : str ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : str ) -> Tuple:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Dict = getattr(_lowerCamelCase ,_lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : Any = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
else:
_lowerCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : Dict = value
elif weight_type == "weight_v":
_lowerCAmelCase : Optional[int] = value
elif weight_type == "bias":
_lowerCAmelCase : int = value
elif weight_type == "running_mean":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "running_var":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase : Any = value
elif weight_type == "inv_freq":
_lowerCAmelCase : Optional[Any] = value
else:
_lowerCAmelCase : int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Any ,_lowerCamelCase : int ) -> Union[str, Any]:
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
_lowerCAmelCase : Union[str, Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : int = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : Any = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase : Optional[Any] = True
if "*" in mapped_key:
_lowerCAmelCase : str = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : int = mapped_key.replace("""*""" ,_lowerCamelCase )
if "pos_bias_u" in name:
_lowerCAmelCase : Optional[int] = None
elif "pos_bias_v" in name:
_lowerCAmelCase : Dict = None
elif "weight_g" in name:
_lowerCAmelCase : Optional[Any] = 'weight_g'
elif "weight_v" in name:
_lowerCAmelCase : Dict = 'weight_v'
elif "bias" in name:
_lowerCAmelCase : Tuple = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : int = 'weight'
elif "running_mean" in name:
_lowerCAmelCase : str = 'running_mean'
elif "inv_freq" in name:
_lowerCAmelCase : List[Any] = 'inv_freq'
elif "running_var" in name:
_lowerCAmelCase : Union[str, Any] = 'running_var'
elif "num_batches_tracked" in name:
_lowerCAmelCase : Optional[Any] = 'num_batches_tracked'
else:
_lowerCAmelCase : List[str] = None
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Any ,_lowerCamelCase : str ) -> Any:
_lowerCAmelCase : str = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : str = name.split(""".""" )
_lowerCAmelCase : Dict = int(items[0] )
_lowerCAmelCase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : List[Any]=None ,_lowerCamelCase : Union[str, Any]=None ,_lowerCamelCase : Any=True ) -> Any:
if config_path is not None:
_lowerCAmelCase : Tuple = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase ,hidden_act="""swish""" )
else:
_lowerCAmelCase : Optional[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCAmelCase : Dict = 'rotary'
if is_finetuned:
if dict_path:
_lowerCAmelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : int = target_dict.pad_index
_lowerCAmelCase : List[Any] = target_dict.bos_index
_lowerCAmelCase : Any = target_dict.eos_index
_lowerCAmelCase : Dict = len(target_dict.symbols )
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase ,"""vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
_lowerCAmelCase : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[Any] = 1
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=_lowerCamelCase ,)
_lowerCAmelCase : Tuple = True if config.feat_extract_norm == 'layer' else False
_lowerCAmelCase : Any = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=_lowerCamelCase ,return_attention_mask=_lowerCamelCase ,)
_lowerCAmelCase : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase ,tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[Any] = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCAmelCase : List[Any] = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase : Optional[Any] = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase : str = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCAmelCase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=_lowerCamelCase )
_lowerCAmelCase : Tuple = model[0].eval()
recursively_load_weights(_lowerCamelCase ,_lowerCamelCase ,not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_a : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Any:
_lowerCAmelCase : str = len(__snake_case )
for i in range(length - 1 ):
_lowerCAmelCase : Dict = i
for k in range(i + 1 ,__snake_case ):
if collection[k] < collection[least]:
_lowerCAmelCase : Optional[Any] = k
if least != i:
_lowerCAmelCase , _lowerCAmelCase : int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_a : int = input('Enter numbers separated by a comma:\n').strip()
_a : Optional[Any] = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
import json
import sys
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[str] ) -> Union[str, Any]:
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCamelCase_ ):
_lowerCAmelCase : str = results[benchmark_name]
_lowerCAmelCase : Union[str, Any] = benchmark_name.split("""/""" )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
_lowerCAmelCase : Optional[Any] = """| metric |"""
_lowerCAmelCase : Dict = """|--------|"""
_lowerCAmelCase : int = """| new / old (diff) |"""
for metric_name in sorted(lowerCamelCase_ ):
_lowerCAmelCase : Optional[Any] = benchmark_res[metric_name]
_lowerCAmelCase : Tuple = metric_vals["""new"""]
_lowerCAmelCase : Optional[int] = metric_vals.get("""old""" ,lowerCamelCase_ )
_lowerCAmelCase : Tuple = metric_vals.get("""diff""" ,lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = f" {new_val:f}" if isinstance(lowerCamelCase_ ,(int, float) ) else """None"""
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(lowerCamelCase_ ,(int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(lowerCamelCase_ ,(int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCamelCase_ ) )
if __name__ == "__main__":
_a : Tuple = sys.argv[1]
_a : Dict = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
class __A :
def __init__( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = name
_lowerCAmelCase : Tuple = val
def __str__( self ):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self , a__ ):
return self.val < other.val
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : int = {}
_lowerCAmelCase : Union[str, Any] = self.build_heap(UpperCamelCase_ )
def __getitem__( self , a__ ):
return self.get_value(UpperCamelCase_ )
def __A ( self , a__ ):
return (idx - 1) // 2
def __A ( self , a__ ):
return idx * 2 + 1
def __A ( self , a__ ):
return idx * 2 + 2
def __A ( self , a__ ):
return self.heap_dict[key]
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = len(UpperCamelCase_ ) - 1
_lowerCAmelCase : Tuple = self.get_parent_idx(UpperCamelCase_ )
for idx, i in enumerate(UpperCamelCase_ ):
_lowerCAmelCase : str = idx
_lowerCAmelCase : Any = i.val
for i in range(UpperCamelCase_ , -1 , -1 ):
self.sift_down(UpperCamelCase_ , UpperCamelCase_ )
return array
def __A ( self , a__ , a__ ):
while True:
_lowerCAmelCase : str = self.get_left_child_idx(UpperCamelCase_ ) # noqa: E741
_lowerCAmelCase : Any = self.get_right_child_idx(UpperCamelCase_ )
_lowerCAmelCase : List[str] = idx
if l < len(UpperCamelCase_ ) and array[l] < array[idx]:
_lowerCAmelCase : Optional[Any] = l
if r < len(UpperCamelCase_ ) and array[r] < array[smallest]:
_lowerCAmelCase : List[str] = r
if smallest != idx:
_lowerCAmelCase , _lowerCAmelCase : Dict = array[smallest], array[idx]
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : str = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_lowerCAmelCase : Dict = smallest
else:
break
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCamelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.heap[idx], self.heap[p]
_lowerCAmelCase , _lowerCAmelCase : int = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_lowerCAmelCase : List[str] = p
_lowerCAmelCase : Any = self.get_parent_idx(UpperCamelCase_ )
def __A ( self ):
return self.heap[0]
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.heap[-1], self.heap[0]
_lowerCAmelCase , _lowerCAmelCase : str = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_lowerCAmelCase : Optional[Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __A ( self , a__ ):
self.heap.append(UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = len(self.heap ) - 1
_lowerCAmelCase : Union[str, Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def __A ( self ):
return len(self.heap ) == 0
def __A ( self , a__ , a__ ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_lowerCAmelCase : Optional[Any] = new_value
_lowerCAmelCase : Optional[Any] = new_value
self.sift_up(self.idx_of_element[node] )
_a : int = Node('R', -1)
_a : str = Node('B', 6)
_a : Union[str, Any] = Node('A', 3)
_a : List[str] = Node('X', 1)
_a : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_a : Any = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Dict = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_a : Optional[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_a : Dict = {'facebook/blenderbot_small-90M': 512}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase : Any = set()
_lowerCAmelCase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : int = char
_lowerCAmelCase : List[Any] = set(lowerCamelCase__ )
return pairs
class __A ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__ , a__="__start__" , a__="__end__" , a__="__unk__" , a__="__null__" , **a__ , ):
super().__init__(unk_token=a__ , bos_token=a__ , eos_token=a__ , pad_token=a__ , **a__ )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase : str = json.load(a__ )
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(a__ , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase : str = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase : List[str] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : Optional[Any] = {}
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Optional[int] = re.sub("""([.,!?()])""" , r""" \1""" , a__ )
_lowerCAmelCase : List[Any] = re.sub("""(')""" , r""" \1 """ , a__ )
_lowerCAmelCase : Optional[int] = re.sub(r"""\s{2,}""" , """ """ , a__ )
if "\n" in token:
_lowerCAmelCase : Union[str, Any] = token.replace("""\n""" , """ __newln__""" )
_lowerCAmelCase : str = token.split(""" """ )
_lowerCAmelCase : int = []
for token in tokens:
if not len(a__ ):
continue
_lowerCAmelCase : List[Any] = token.lower()
_lowerCAmelCase : Optional[Any] = tuple(a__ )
_lowerCAmelCase : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_lowerCAmelCase : str = get_pairs(a__ )
if not pairs:
words.append(a__ )
continue
while True:
_lowerCAmelCase : List[Any] = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase : Optional[Any] = bigram
_lowerCAmelCase : str = []
_lowerCAmelCase : List[Any] = 0
while i < len(a__ ):
try:
_lowerCAmelCase : Dict = word.index(a__ , a__ )
new_word.extend(word[i:j] )
_lowerCAmelCase : int = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Union[str, Any] = tuple(a__ )
_lowerCAmelCase : Optional[Any] = new_word
if len(a__ ) == 1:
break
else:
_lowerCAmelCase : List[Any] = get_pairs(a__ )
_lowerCAmelCase : Optional[Any] = "@@ ".join(a__ )
_lowerCAmelCase : Any = word[:-4]
_lowerCAmelCase : Dict = word
words.append(a__ )
return " ".join(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[Any] = re.findall(r"""\S+\n?""" , a__ )
for token in words:
split_tokens.extend(list(self.bpe(a__ ).split(""" """ ) ) )
return split_tokens
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = token.lower()
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
return self.decoder.get(a__ , self.unk_token )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = " ".join(a__ ).replace("""@@ """ , """""" ).strip()
return out_string
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : int = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : str = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + """\n""" )
_lowerCAmelCase : Optional[Any] = 0
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase : Dict = token_index
writer.write(""" """.join(a__ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : List[str] = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "xlm-prophetnet"
_UpperCamelCase : List[str] = ["past_key_values"]
_UpperCamelCase : List[Any] = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , a__ = 0.1 , a__ = "gelu" , a__ = 30522 , a__ = 1024 , a__ = 4096 , a__ = 12 , a__ = 16 , a__ = 4096 , a__ = 12 , a__ = 16 , a__ = 0.1 , a__ = 0.1 , a__ = 512 , a__ = 0.0_2 , a__ = True , a__ = True , a__ = 0 , a__ = 2 , a__ = 32 , a__ = 128 , a__ = False , a__ = 0.0 , a__ = True , a__ = 0 , a__ = 1 , a__ = 2 , **a__ , ):
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = encoder_ffn_dim
_lowerCAmelCase : Union[str, Any] = num_encoder_layers
_lowerCAmelCase : Dict = num_encoder_attention_heads
_lowerCAmelCase : Optional[Any] = decoder_ffn_dim
_lowerCAmelCase : List[str] = num_decoder_layers
_lowerCAmelCase : Any = num_decoder_attention_heads
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Dict = init_std # Normal(0, this parameter)
_lowerCAmelCase : List[Any] = activation_function
# parameters for xlmprophetnet
_lowerCAmelCase : Union[str, Any] = ngram
_lowerCAmelCase : Union[str, Any] = num_buckets
_lowerCAmelCase : int = relative_max_distance
_lowerCAmelCase : List[str] = disable_ngram_loss
_lowerCAmelCase : Optional[int] = eps
# 3 Types of Dropout
_lowerCAmelCase : Optional[int] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : Tuple = dropout
_lowerCAmelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
@property
def __A ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __A ( self , a__ ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ) -> bool:
return np.array_equal(snake_case__ ,matrix.conjugate().T )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : np.ndarray ) -> Any:
_lowerCAmelCase : Optional[Any] = v.conjugate().T
_lowerCAmelCase : Dict = v_star.dot(snake_case__ )
assert isinstance(snake_case__ ,np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : List[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
_lowerCAmelCase : List[Any] = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), f"{a} is not hermitian."
print(rayleigh_quotient(snake_case__ ,snake_case__ ) )
_lowerCAmelCase : Tuple = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), f"{a} is not hermitian."
assert rayleigh_quotient(snake_case__ ,snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
"""simple docstring"""
import re
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> bool:
_lowerCAmelCase : List[str] = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(_lowerCamelCase ,_lowerCamelCase ) )
if __name__ == "__main__":
_a : Any = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[Any] ,_lowerCamelCase : int ) -> str:
if openai_config_file == "":
_lowerCAmelCase : Any = OpenAIGPTConfig()
else:
_lowerCAmelCase : List[Any] = OpenAIGPTConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = OpenAIGPTModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_lowerCAmelCase : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() ,_lowerCamelCase )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_a : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Any = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : str = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : List[str] = Accelerator()
_lowerCAmelCase : str = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE_ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[float]] ) -> Optional[int]:
_lowerCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(_lowerCamelCase ):
if len(_lowerCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_lowerCamelCase ) )
return data_lists
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[float]] ,_lowerCamelCase : list[int] ) -> Dict:
_lowerCAmelCase : list[list[float]] = []
for dlist, weight in zip(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = min(_lowerCamelCase )
_lowerCAmelCase : Tuple = max(_lowerCamelCase )
_lowerCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_lowerCAmelCase : Optional[int] = f"Invalid weight of {weight:f} provided"
raise ValueError(_lowerCamelCase )
score_lists.append(_lowerCamelCase )
return score_lists
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[float]] ) -> str:
_lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[list[float]] ,_lowerCamelCase : list[int] ) -> List[Any]:
_lowerCAmelCase : List[str] = get_data(_lowerCamelCase )
_lowerCAmelCase : List[Any] = calculate_each_score(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = generate_final_scores(_lowerCamelCase )
# append scores to source data
for i, ele in enumerate(_lowerCamelCase ):
source_data[i].append(_lowerCamelCase )
return source_data
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_a : List[Any] = NewType('DataClass', Any)
_a : Tuple = NewType('DataClassType', Any)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> str:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> Callable[[str], Any]:
_lowerCAmelCase : Optional[Any] = {str(_lowerCamelCase ): choice for choice in choices}
return lambda _lowerCamelCase : str_to_choice.get(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( *,
_lowerCamelCase : Union[str, List[str]] = None ,_lowerCamelCase : str = None ,_lowerCamelCase : Any = dataclasses.MISSING ,_lowerCamelCase : Callable[[], Any] = dataclasses.MISSING ,_lowerCamelCase : dict = None ,**_lowerCamelCase : Optional[Any] ,) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowerCAmelCase : Tuple = {}
if aliases is not None:
_lowerCAmelCase : Dict = aliases
if help is not None:
_lowerCAmelCase : Optional[Any] = help
return dataclasses.field(metadata=_lowerCamelCase ,default=_lowerCamelCase ,default_factory=_lowerCamelCase ,**_lowerCamelCase )
class __A ( lowercase__ ):
_UpperCamelCase : int = 42
def __init__( self , a__ , **a__ ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_lowerCAmelCase : Dict = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase__ )
if dataclasses.is_dataclass(UpperCAmelCase__ ):
_lowerCAmelCase : List[str] = [dataclass_types]
_lowerCAmelCase : Any = list(UpperCAmelCase__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase__ )
@staticmethod
def __A ( a__ , a__ ):
_lowerCAmelCase : Optional[int] = F"--{field.name}"
_lowerCAmelCase : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase__ ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
_lowerCAmelCase : Dict = kwargs.pop("""aliases""" , [] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_lowerCAmelCase : Dict = [aliases]
_lowerCAmelCase : Union[str, Any] = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase__ , """UnionType""" ) and isinstance(UpperCAmelCase__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase__ ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F" Problem encountered in field \'{field.name}\'." )
if type(UpperCAmelCase__ ) not in field.type.__args__:
# filter `str` in Union
_lowerCAmelCase : Optional[int] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowerCAmelCase : Union[str, Any] = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowerCAmelCase : Dict = (
field.type.__args__[0] if isinstance(UpperCAmelCase__ , field.type.__args__[1] ) else field.type.__args__[1]
)
_lowerCAmelCase : Optional[int] = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowerCAmelCase : Union[str, Any] = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase__ ) and issubclass(field.type , UpperCAmelCase__ )):
if origin_type is Literal:
_lowerCAmelCase : Tuple = field.type.__args__
else:
_lowerCAmelCase : Optional[Any] = [x.value for x in field.type]
_lowerCAmelCase : str = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
_lowerCAmelCase : List[Any] = field.default
else:
_lowerCAmelCase : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowerCAmelCase : Optional[Any] = copy(UpperCAmelCase__ )
# Hack because type=bool in argparse does not behave as we want.
_lowerCAmelCase : str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowerCAmelCase : List[str] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowerCAmelCase : int = default
# This tells argparse we accept 0 or 1 value after --field_name
_lowerCAmelCase : List[str] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_lowerCAmelCase : str = True
elif isclass(UpperCAmelCase__ ) and issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
_lowerCAmelCase : int = field.type.__args__[0]
_lowerCAmelCase : Optional[int] = '''+'''
if field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase : List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
_lowerCAmelCase : Union[str, Any] = True
else:
_lowerCAmelCase : List[Any] = field.type
if field.default is not dataclasses.MISSING:
_lowerCAmelCase : Any = field.default
elif field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase : Dict = field.default_factory()
else:
_lowerCAmelCase : List[str] = True
parser.add_argument(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowerCAmelCase : List[Any] = False
parser.add_argument(F"--no_{field.name}" , action="""store_false""" , dest=field.name , **UpperCAmelCase__ )
def __A ( self , a__ ):
if hasattr(UpperCAmelCase__ , """_argument_group_name""" ):
_lowerCAmelCase : List[Any] = self.add_argument_group(dtype._argument_group_name )
else:
_lowerCAmelCase : str = self
try:
_lowerCAmelCase : Dict[str, type] = get_type_hints(UpperCAmelCase__ )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase__ ):
_lowerCAmelCase : Dict = '''.'''.join(map(UpperCAmelCase__ , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase__ ):
if not field.init:
continue
_lowerCAmelCase : int = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self , a__=None , a__=False , a__=True , a__=None , a__=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_lowerCAmelCase : Optional[Any] = []
if args_filename:
args_files.append(Path(UpperCAmelCase__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowerCAmelCase : int = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase__ , type=UpperCAmelCase__ , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
_lowerCAmelCase : Any = args_file_parser.parse_known_args(args=UpperCAmelCase__ )
_lowerCAmelCase : str = vars(UpperCAmelCase__ ).get(args_file_flag.lstrip("""-""" ) , UpperCAmelCase__ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase__ ) for p in cmd_args_file_paths] )
_lowerCAmelCase : List[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowerCAmelCase : Optional[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
_lowerCAmelCase : Union[str, Any] = self.parse_known_args(args=UpperCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = []
for dtype in self.dataclass_types:
_lowerCAmelCase : int = {f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init}
_lowerCAmelCase : int = {k: v for k, v in vars(UpperCAmelCase__ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
_lowerCAmelCase : Union[str, Any] = dtype(**UpperCAmelCase__ )
outputs.append(UpperCAmelCase__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def __A ( self , a__ , a__ = False ):
_lowerCAmelCase : List[str] = set(args.keys() )
_lowerCAmelCase : Dict = []
for dtype in self.dataclass_types:
_lowerCAmelCase : int = {f.name for f in dataclasses.fields(UpperCAmelCase__ ) if f.init}
_lowerCAmelCase : Dict = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_lowerCAmelCase : str = dtype(**UpperCAmelCase__ )
outputs.append(UpperCAmelCase__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase__ )}" )
return tuple(UpperCAmelCase__ )
def __A ( self , a__ , a__ = False ):
with open(Path(UpperCAmelCase__ ) , encoding="""utf-8""" ) as open_json_file:
_lowerCAmelCase : str = json.loads(open_json_file.read() )
_lowerCAmelCase : Tuple = self.parse_dict(UpperCAmelCase__ , allow_extra_keys=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def __A ( self , a__ , a__ = False ):
_lowerCAmelCase : Union[str, Any] = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase__ ).read_text() ) , allow_extra_keys=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : Union[str, Any] = n
_lowerCAmelCase : List[str] = [None] * self.n
_lowerCAmelCase : Optional[int] = 0 # index of the first element
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = 0
def __len__( self ):
return self.size
def __A ( self ):
return self.size == 0
def __A ( self ):
return False if self.is_empty() else self.array[self.front]
def __A ( self , a__ ):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
_lowerCAmelCase : Tuple = data
_lowerCAmelCase : List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def __A ( self ):
if self.size == 0:
raise Exception("""UNDERFLOW""" )
_lowerCAmelCase : List[str] = self.array[self.front]
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[int] = (self.front + 1) % self.n
self.size -= 1
return temp
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=lowerCamelCase__ ):
_UpperCamelCase : int = ["transformers", "torch", "note_seq"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def __A ( cls , *a__ , **a__ ):
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( UpperCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : str = RobertaTokenizer
_UpperCamelCase : Optional[Any] = RobertaTokenizerFast
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[Any] = {"cls_token": "<s>"}
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCAmelCase : List[str] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Optional[int] = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = """lower newer"""
_lowerCAmelCase : int = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Dict = tokenizer.tokenize(_lowercase ) # , add_prefix_space=True)
self.assertListEqual(_lowercase , _lowercase )
_lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def __A ( self ):
_lowerCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=_lowercase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=_lowercase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained("""roberta-base""" )
_lowerCAmelCase : str = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowercase )
_lowerCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowercase )
_lowerCAmelCase : str = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(_lowercase )
_lowerCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __A ( self ):
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = """Encode this sequence."""
_lowerCAmelCase : Dict = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
_lowerCAmelCase : int = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowercase , _lowercase )
_lowerCAmelCase : List[str] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowercase , _lowercase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
_lowerCAmelCase : List[str] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
# Testing spaces after special tokens
_lowerCAmelCase : Tuple = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase )} ) # mask token has a left space
_lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(_lowercase )
_lowerCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
_lowerCAmelCase : List[str] = """Encode <mask>sequence"""
_lowerCAmelCase : int = tokenizer.encode(_lowercase )
_lowerCAmelCase : int = encoded.index(_lowercase )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowercase , _lowercase )
_lowerCAmelCase : Any = tokenizer.encode(_lowercase )
_lowerCAmelCase : Optional[int] = encoded.index(_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
def __A ( self ):
pass
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCAmelCase : str = """A, <mask> AllenNLP sentence."""
_lowerCAmelCase : Optional[Any] = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_lowerCAmelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_lowerCAmelCase : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __A ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCAmelCase : List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , _lowercase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , _lowercase )
self.assertEqual(post_processor_state["""trim_offsets"""] , _lowercase )
def __A ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCAmelCase : Dict = F"{text_of_1_token} {text_of_1_token}"
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : str = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Tuple = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Dict = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Optional[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : List[Any] = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Dict = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : Any = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
_lowerCAmelCase : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Dict = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __A ( snake_case__ ):
_UpperCamelCase : Dict = "speech_to_text_2"
_UpperCamelCase : int = ["past_key_values"]
_UpperCamelCase : Any = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a__=10000 , a__=6 , a__=2048 , a__=4 , a__=0.0 , a__=True , a__="relu" , a__=256 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=2 , a__=True , a__=1 , a__=0 , a__=2 , a__=1024 , **a__ , ):
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Dict = d_model
_lowerCAmelCase : int = decoder_ffn_dim
_lowerCAmelCase : Optional[Any] = decoder_layers
_lowerCAmelCase : Optional[Any] = decoder_attention_heads
_lowerCAmelCase : int = dropout
_lowerCAmelCase : str = attention_dropout
_lowerCAmelCase : Dict = activation_dropout
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Any = init_std
_lowerCAmelCase : Union[str, Any] = decoder_layerdrop
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : Dict = decoder_layers
_lowerCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCAmelCase : Dict = max_target_positions
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
# Load configuration defined in the metadata file
with open(_lowerCamelCase ) as metadata_file:
_lowerCAmelCase : List[str] = json.load(_lowerCamelCase )
_lowerCAmelCase : int = LukeConfig(use_entity_aware_attention=_lowerCamelCase ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase : List[Any] = torch.load(_lowerCamelCase ,map_location="""cpu""" )["""module"""]
# Load the entity vocab file
_lowerCAmelCase : Any = load_original_entity_vocab(_lowerCamelCase )
# add an entry for [MASK2]
_lowerCAmelCase : Optional[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowerCAmelCase : Dict = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase : Optional[int] = AddedToken("""<ent>""" ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase )
_lowerCAmelCase : Dict = AddedToken("""<ent2>""" ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,"""tokenizer_config.json""" ) ,"""r""" ) as f:
_lowerCAmelCase : int = json.load(_lowerCamelCase )
_lowerCAmelCase : Dict = """MLukeTokenizer"""
with open(os.path.join(_lowerCamelCase ,"""tokenizer_config.json""" ) ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Dict = MLukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
_lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
_lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
_lowerCAmelCase : Any = state_dict["""embeddings.word_embeddings.weight"""]
_lowerCAmelCase : str = word_emb[ent_init_index].unsqueeze(0 )
_lowerCAmelCase : Optional[Any] = word_emb[enta_init_index].unsqueeze(0 )
_lowerCAmelCase : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowerCAmelCase : Tuple = state_dict[bias_name]
_lowerCAmelCase : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
_lowerCAmelCase : int = decoder_bias[enta_init_index].unsqueeze(0 )
_lowerCAmelCase : Optional[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase : Any = f"encoder.layer.{layer_index}.attention.self."
_lowerCAmelCase : Dict = state_dict[prefix + matrix_name]
_lowerCAmelCase : int = state_dict[prefix + matrix_name]
_lowerCAmelCase : int = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase : str = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowerCAmelCase : Optional[int] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowerCAmelCase : List[str] = state_dict["""entity_predictions.bias"""]
_lowerCAmelCase : List[str] = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowerCAmelCase : Optional[int] = LukeForMaskedLM(config=_lowerCamelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
_lowerCAmelCase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
_lowerCAmelCase : Optional[Any] = state_dict[key]
else:
_lowerCAmelCase : Dict = state_dict[key]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model.load_state_dict(_lowerCamelCase ,strict=_lowerCamelCase )
if set(_lowerCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(_lowerCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowerCAmelCase : Union[str, Any] = MLukeTokenizer.from_pretrained(_lowerCamelCase ,task="""entity_classification""" )
_lowerCAmelCase : Union[str, Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
_lowerCAmelCase : int = (0, 9)
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase ,entity_spans=[span] ,return_tensors="""pt""" )
_lowerCAmelCase : List[str] = model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase : Tuple = torch.Size((1, 33, 768) )
_lowerCAmelCase : List[str] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_lowerCamelCase ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase : List[Any] = torch.Size((1, 1, 768) )
_lowerCAmelCase : str = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,_lowerCamelCase ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowerCAmelCase : int = MLukeTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[Any] = """Tokyo is the capital of <mask>."""
_lowerCAmelCase : Tuple = (24, 30)
_lowerCAmelCase : Any = tokenizer(_lowerCamelCase ,entity_spans=[span] ,return_tensors="""pt""" )
_lowerCAmelCase : List[str] = model(**_lowerCamelCase )
_lowerCAmelCase : Tuple = encoding["""input_ids"""][0].tolist()
_lowerCAmelCase : Any = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
_lowerCAmelCase : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowerCamelCase )
_lowerCAmelCase : str = outputs.entity_logits[0][0].argmax().item()
_lowerCAmelCase : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Tuple:
_lowerCAmelCase : Optional[Any] = ["""[MASK]""", """[PAD]""", """[UNK]"""]
_lowerCAmelCase : Any = [json.loads(_lowerCamelCase ) for line in open(_lowerCamelCase )]
_lowerCAmelCase : List[Any] = {}
for entry in data:
_lowerCAmelCase : Dict = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowerCAmelCase : Optional[int] = entity_id
break
_lowerCAmelCase : Optional[Any] = f"{language}:{entity_name}"
_lowerCAmelCase : Optional[Any] = entity_id
return new_mapping
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
_a : Union[str, Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_a : Optional[int] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : Dict = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "resnet"
_UpperCamelCase : Optional[int] = ["basic", "bottleneck"]
def __init__( self , a__=3 , a__=64 , a__=[256, 512, 1024, 2048] , a__=[3, 4, 6, 3] , a__="bottleneck" , a__="relu" , a__=False , a__=None , a__=None , **a__ , ):
super().__init__(**__UpperCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : int = embedding_size
_lowerCAmelCase : Dict = hidden_sizes
_lowerCAmelCase : List[str] = depths
_lowerCAmelCase : List[str] = layer_type
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[int] = downsample_in_first_stage
_lowerCAmelCase : List[Any] = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = version.parse("1.11" )
@property
def __A ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __A ( self ):
return 1e-3
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str = "cpu" ,_lowerCamelCase : Union[str, None] = None ) -> None:
_lowerCAmelCase : int = torch.load(_lowerCamelCase ,map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase ,torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
_lowerCAmelCase : List[Any] = v.half()
if save_path is None: # overwrite src_path
_lowerCAmelCase : Optional[Any] = src_path
torch.save(_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowerCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
_lowerCAmelCase : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowerCAmelCase : int = {'unk_token': '<unk>'}
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
_lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def __A ( self , **a__ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self , **a__ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self , **a__ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : str = self.get_rust_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
_lowerCAmelCase : List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def __A ( self ):
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
_lowerCAmelCase : int = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : int = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : str = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = image_processor(UpperCamelCase__ , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processor(images=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : Tuple = 'lower newer'
_lowerCAmelCase : Dict = processor(text=UpperCamelCase__ )
_lowerCAmelCase : List[str] = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ):
_lowerCAmelCase : List[str] = self.get_image_processor()
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : str = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : Optional[Any] = 'lower newer'
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
_lowerCAmelCase : Dict = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : List[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = processor(images=UpperCamelCase__ , visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def __A ( self ):
_lowerCAmelCase : List[str] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
_lowerCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Union[str, Any] = processor.batch_decode(UpperCamelCase__ )
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_a : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : Optional[datasets.Features] = None
_UpperCamelCase : str = "utf-8"
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : bool = True # deprecated
_UpperCamelCase : Optional[int] = None # deprecated
_UpperCamelCase : int = 10 << 20 # 10MB
_UpperCamelCase : Optional[bool] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : Union[str, Any] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
_lowerCAmelCase : Optional[Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase_ , (str, list, tuple) ):
_lowerCAmelCase : Tuple = data_files
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowerCAmelCase : Optional[Any] = [files]
_lowerCAmelCase : Optional[Any] = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Tuple = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowerCAmelCase : Dict = [files]
_lowerCAmelCase : Union[str, Any] = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase_ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
_lowerCAmelCase : List[str] = self.config.features.arrow_schema.field(lowerCamelCase_ ).type
_lowerCAmelCase : Dict = pa_table.append_column(lowerCamelCase_ , pa.array([None] * len(lowerCamelCase_ ) , type=lowerCamelCase_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : List[str] = table_cast(lowerCamelCase_ , self.config.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCamelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowerCAmelCase : str = json.load(lowerCamelCase_ )
# We keep only the field we are interested in
_lowerCAmelCase : Dict = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCamelCase_ , (list, tuple) ):
_lowerCAmelCase : Optional[Any] = set().union(*[row.keys() for row in dataset] )
_lowerCAmelCase : str = {col: [row.get(lowerCamelCase_ ) for row in dataset] for col in keys}
else:
_lowerCAmelCase : int = dataset
_lowerCAmelCase : Any = pa.Table.from_pydict(lowerCamelCase_ )
yield file_idx, self._cast_table(lowerCamelCase_ )
# If the file has one json object per line
else:
with open(lowerCamelCase_ , """rb""" ) as f:
_lowerCAmelCase : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
_lowerCAmelCase : Union[str, Any] = max(self.config.chunksize // 32 , 16 << 10 )
_lowerCAmelCase : Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
_lowerCAmelCase : Optional[Any] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCamelCase_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
_lowerCAmelCase : Any = batch.decode(self.config.encoding , errors=lowerCamelCase_ ).encode("""utf-8""" )
try:
while True:
try:
_lowerCAmelCase : Union[str, Any] = paj.read_json(
io.BytesIO(lowerCamelCase_ ) , read_options=paj.ReadOptions(block_size=lowerCamelCase_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCamelCase_ , pa.ArrowInvalid )
and "straddling" not in str(lowerCamelCase_ )
or block_size > len(lowerCamelCase_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(lowerCamelCase_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCamelCase_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
_lowerCAmelCase : str = json.load(lowerCamelCase_ )
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(lowerCamelCase_ )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCamelCase_ , lowerCamelCase_ ): # list is the only sequence type supported in JSON
try:
_lowerCAmelCase : List[Any] = set().union(*[row.keys() for row in dataset] )
_lowerCAmelCase : List[Any] = {col: [row.get(lowerCamelCase_ ) for row in dataset] for col in keys}
_lowerCAmelCase : Optional[int] = pa.Table.from_pydict(lowerCamelCase_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(lowerCamelCase_ )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(lowerCamelCase_ )
break
else:
logger.error(F"Failed to read file '{file}' with error {type(lowerCamelCase_ )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase_ )
batch_idx += 1
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Dict:
_lowerCAmelCase : str = len(_lowerCamelCase )
while cur > 1:
# Find the maximum number in arr
_lowerCAmelCase : Any = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCAmelCase : Optional[Any] = arr[mi::-1] + arr[mi + 1 : len(_lowerCamelCase )]
# Reverse whole list
_lowerCAmelCase : List[str] = arr[cur - 1 :: -1] + arr[cur : len(_lowerCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_a : int = input('Enter numbers separated by a comma:\n').strip()
_a : Any = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Tuple = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class __A ( __UpperCAmelCase ):
_UpperCamelCase : Optional[int] = "instructblip_vision_model"
def __init__( self , a__=1408 , a__=6144 , a__=39 , a__=16 , a__=224 , a__=14 , a__="gelu" , a__=1e-6 , a__=0.0 , a__=1e-10 , a__=True , **a__ , ):
super().__init__(**lowerCAmelCase_ )
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Any = qkv_bias
@classmethod
def __A ( cls , a__ , **a__ ):
cls._set_token_in_kwargs(lowerCAmelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Dict = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowerCAmelCase : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class __A ( __UpperCAmelCase ):
_UpperCamelCase : Optional[int] = "instructblip_qformer"
def __init__( self , a__=30522 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0.0_2 , a__=1e-12 , a__=0 , a__="absolute" , a__=2 , a__=1408 , **a__ , ):
super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Optional[int] = position_embedding_type
_lowerCAmelCase : List[str] = cross_attention_frequency
_lowerCAmelCase : Union[str, Any] = encoder_hidden_size
@classmethod
def __A ( cls , a__ , **a__ ):
cls._set_token_in_kwargs(lowerCAmelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Any = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowerCAmelCase : Optional[int] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class __A ( __UpperCAmelCase ):
_UpperCamelCase : List[Any] = "instructblip"
_UpperCamelCase : Optional[Any] = True
def __init__( self , a__=None , a__=None , a__=None , a__=32 , **a__ ):
super().__init__(**lowerCAmelCase_ )
if vision_config is None:
_lowerCAmelCase : Optional[Any] = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowerCAmelCase : Dict = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowerCAmelCase : List[str] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowerCAmelCase : Union[str, Any] = InstructBlipVisionConfig(**lowerCAmelCase_ )
_lowerCAmelCase : List[str] = InstructBlipQFormerConfig(**lowerCAmelCase_ )
_lowerCAmelCase : Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowerCAmelCase : Optional[int] = CONFIG_MAPPING[text_model_type](**lowerCAmelCase_ )
_lowerCAmelCase : str = self.text_config.tie_word_embeddings
_lowerCAmelCase : Dict = self.text_config.is_encoder_decoder
_lowerCAmelCase : Dict = num_query_tokens
_lowerCAmelCase : Any = self.vision_config.hidden_size
_lowerCAmelCase : Union[str, Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase : Optional[int] = 1.0
_lowerCAmelCase : Dict = 0.0_2
@classmethod
def __A ( cls , a__ , a__ , a__ , **a__ , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase_ , )
def __A ( self ):
_lowerCAmelCase : List[str] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : List[Any] = self.vision_config.to_dict()
_lowerCAmelCase : int = self.qformer_config.to_dict()
_lowerCAmelCase : List[str] = self.text_config.to_dict()
_lowerCAmelCase : Optional[int] = self.__class__.model_type
return output
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> List[Any]:
_lowerCAmelCase : Any = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_lowerCAmelCase : int = True if '''large''' in model_name or '''huge''' in model_name else False
_lowerCAmelCase : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
_lowerCAmelCase : List[str] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_lowerCAmelCase : int = [3, 3, 3, 3]
_lowerCAmelCase : str = [5, 5, 5, 5]
elif "fl4" in model_name:
_lowerCAmelCase : List[str] = [4, 4, 4, 4]
_lowerCAmelCase : Union[str, Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_lowerCAmelCase : Any = [3, 3, 3, 3]
if "lrf" in model_name:
_lowerCAmelCase : Optional[Any] = [3, 3, 3, 3]
else:
_lowerCAmelCase : Any = [2, 2, 2, 2]
if "tiny" in model_name:
_lowerCAmelCase : Dict = 96
elif "small" in model_name:
_lowerCAmelCase : Any = 96
elif "base" in model_name:
_lowerCAmelCase : Union[str, Any] = 128
elif "large" in model_name:
_lowerCAmelCase : List[Any] = 192
elif "xlarge" in model_name:
_lowerCAmelCase : List[Any] = 256
elif "huge" in model_name:
_lowerCAmelCase : Optional[Any] = 352
# set label information
_lowerCAmelCase : Dict = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_lowerCAmelCase : Optional[Any] = '''imagenet-22k-id2label.json'''
else:
_lowerCAmelCase : Union[str, Any] = '''imagenet-1k-id2label.json'''
_lowerCAmelCase : Tuple = json.load(open(hf_hub_download(a_ ,a_ ,repo_type="""dataset""" ) ,"""r""" ) )
_lowerCAmelCase : Dict = {int(a_ ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Any = FocalNetConfig(
embed_dim=a_ ,depths=a_ ,focal_levels=a_ ,focal_windows=a_ ,use_conv_embed=a_ ,idalabel=a_ ,labelaid=a_ ,use_post_layernorm=a_ ,use_layerscale=a_ ,)
return config
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> int:
if "patch_embed.proj" in name:
_lowerCAmelCase : Dict = name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_lowerCAmelCase : Dict = name.replace("""patch_embed.norm""" ,"""embeddings.norm""" )
if "layers" in name:
_lowerCAmelCase : Any = '''encoder.''' + name
if "encoder.layers" in name:
_lowerCAmelCase : Any = name.replace("""encoder.layers""" ,"""encoder.stages""" )
if "downsample.proj" in name:
_lowerCAmelCase : Optional[int] = name.replace("""downsample.proj""" ,"""downsample.projection""" )
if "blocks" in name:
_lowerCAmelCase : int = name.replace("""blocks""" ,"""layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_lowerCAmelCase : int = name.replace("""modulation.f""" ,"""modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("""modulation.h""" ,"""modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_lowerCAmelCase : Tuple = name.replace("""modulation.proj""" ,"""modulation.projection_out""" )
if name == "norm.weight":
_lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "norm.bias":
_lowerCAmelCase : List[Any] = '''layernorm.bias'''
if "head" in name:
_lowerCAmelCase : List[Any] = name.replace("""head""" ,"""classifier""" )
else:
_lowerCAmelCase : Tuple = '''focalnet.''' + name
return name
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Union[str, Any]=False ) -> Union[str, Any]:
# fmt: off
_lowerCAmelCase : Optional[Any] = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_lowerCAmelCase : Optional[int] = model_name_to_url[model_name]
print("""Checkpoint URL: """ ,a_ )
_lowerCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(a_ ,map_location="""cpu""" )['''model''']
# rename keys
for key in state_dict.copy().keys():
_lowerCAmelCase : Union[str, Any] = state_dict.pop(a_ )
_lowerCAmelCase : Union[str, Any] = val
_lowerCAmelCase : Optional[int] = get_focalnet_config(a_ )
_lowerCAmelCase : Union[str, Any] = FocalNetForImageClassification(a_ )
model.eval()
# load state dict
model.load_state_dict(a_ )
# verify conversion
_lowerCAmelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCAmelCase : Tuple = BitImageProcessor(
do_resize=a_ ,size={"""shortest_edge""": 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=a_ ,crop_size=224 ,do_normalize=a_ ,image_mean=a_ ,image_std=a_ ,)
_lowerCAmelCase : Dict = Image.open(requests.get(a_ ,stream=a_ ).raw )
_lowerCAmelCase : Union[str, Any] = processor(images=a_ ,return_tensors="""pt""" )
_lowerCAmelCase : Dict = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] ,std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowerCAmelCase : List[str] = image_transforms(a_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,a_ ,atol=1e-4 )
_lowerCAmelCase : Optional[Any] = model(**a_ )
_lowerCAmelCase : int = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" ,model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_lowerCAmelCase : Optional[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_lowerCAmelCase : Dict = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_lowerCAmelCase : Any = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_lowerCAmelCase : List[str] = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_lowerCAmelCase : Any = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_lowerCAmelCase : Dict = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
_a : List[str] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : Tuple = None ,) -> Any:
_lowerCAmelCase : str = {}
if train_file is not None:
_lowerCAmelCase : str = [train_file]
if eval_file is not None:
_lowerCAmelCase : List[str] = [eval_file]
if test_file is not None:
_lowerCAmelCase : Optional[Any] = [test_file]
_lowerCAmelCase : List[Any] = datasets.load_dataset("""csv""" ,data_files=UpperCAmelCase__ )
_lowerCAmelCase : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
_lowerCAmelCase : str = features_name.pop(UpperCAmelCase__ )
_lowerCAmelCase : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
_lowerCAmelCase : List[str] = {label: i for i, label in enumerate(UpperCAmelCase__ )}
_lowerCAmelCase : Dict = tokenizer.model_input_names
_lowerCAmelCase : List[Any] = {}
if len(UpperCAmelCase__ ) == 1:
for k in files.keys():
_lowerCAmelCase : List[str] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] ,truncation=UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,padding="""max_length""" ) ,batched=UpperCAmelCase__ ,)
elif len(UpperCAmelCase__ ) == 2:
for k in files.keys():
_lowerCAmelCase : Optional[int] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) ,truncation=UpperCAmelCase__ ,max_length=UpperCAmelCase__ ,padding="""max_length""" ,) ,batched=UpperCAmelCase__ ,)
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_lowerCAmelCase : Dict = {k: v for k, v in ex.items() if k in input_names}
_lowerCAmelCase : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_lowerCAmelCase : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
_lowerCAmelCase : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_lowerCAmelCase : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
_lowerCAmelCase : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
_lowerCAmelCase : Tuple = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_lowerCAmelCase : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_lowerCAmelCase : List[str] = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_lowerCAmelCase : Tuple = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_lowerCAmelCase : Union[str, Any] = (
tf.data.Dataset.from_generator(
UpperCAmelCase__ ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_lowerCAmelCase : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_a : int = logging.getLogger(__name__)
@dataclass
class __A :
_UpperCamelCase : Tuple = field(metadata={"help": "Which column contains the label"} )
_UpperCamelCase : Optional[Any] = field(default=_a , metadata={"help": "The path of the training file"} )
_UpperCamelCase : str = field(default=_a , metadata={"help": "The path of the development file"} )
_UpperCamelCase : List[str] = field(default=_a , metadata={"help": "The path of the test file"} )
_UpperCamelCase : str = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : str = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class __A :
_UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase : Any = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase : str = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase : List[str] = field(default=_a , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase : List[Any] = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = get_tfds(
train_file=data_args.train_file ,eval_file=data_args.dev_file ,test_file=data_args.test_file ,tokenizer=UpperCAmelCase__ ,label_column_id=data_args.label_column_id ,max_seq_length=data_args.max_seq_length ,)
_lowerCAmelCase : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=len(UpperCAmelCase__ ) ,labelaid=UpperCAmelCase__ ,idalabel={id: label for label, id in labelaid.items()} ,finetuning_task="""text-classification""" ,cache_dir=model_args.cache_dir ,)
with training_args.strategy.scope():
_lowerCAmelCase : Dict = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_pt=bool(""".bin""" in model_args.model_name_or_path ) ,config=UpperCAmelCase__ ,cache_dir=model_args.cache_dir ,)
def compute_metrics(_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : Optional[int] = np.argmax(p.predictions ,axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_lowerCAmelCase : List[str] = TFTrainer(
model=UpperCAmelCase__ ,args=UpperCAmelCase__ ,train_dataset=UpperCAmelCase__ ,eval_dataset=UpperCAmelCase__ ,compute_metrics=UpperCAmelCase__ ,)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase : List[str] = trainer.evaluate()
_lowerCAmelCase : str = os.path.join(training_args.output_dir ,"""eval_results.txt""" )
with open(UpperCAmelCase__ ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(UpperCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
_a : List[Any] = list[tuple[int, int]]
_a : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __A :
def __init__( self , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = pos_x
_lowerCAmelCase : int = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : Union[str, Any] = goal_x
_lowerCAmelCase : Optional[Any] = goal_y
_lowerCAmelCase : List[str] = parent
class __A :
def __init__( self , a__ , a__ ):
_lowerCAmelCase : int = Node(start[1] , start[0] , goal[1] , goal[0] , __UpperCamelCase )
_lowerCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , __UpperCamelCase )
_lowerCAmelCase : Dict = [self.start]
_lowerCAmelCase : Optional[Any] = False
def __A ( self ):
while self.node_queue:
_lowerCAmelCase : int = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_lowerCAmelCase : Optional[Any] = True
return self.retrace_path(__UpperCamelCase )
_lowerCAmelCase : Tuple = self.get_successors(__UpperCamelCase )
for node in successors:
self.node_queue.append(__UpperCamelCase )
if not self.reached:
return [self.start.pos]
return None
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__UpperCamelCase , __UpperCamelCase , self.target.pos_y , self.target.pos_x , __UpperCamelCase ) )
return successors
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Any = current_node.parent
path.reverse()
return path
class __A :
def __init__( self , a__ , a__ ):
_lowerCAmelCase : Any = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase : Optional[int] = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase : Any = False
def __A ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_lowerCAmelCase : List[Any] = self.fwd_bfs.node_queue.pop(0 )
_lowerCAmelCase : str = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_lowerCAmelCase : List[Any] = True
return self.retrace_bidirectional_path(
__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase : Optional[Any] = current_bwd_node
_lowerCAmelCase : List[Any] = current_fwd_node
_lowerCAmelCase : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__UpperCamelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(__UpperCamelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__UpperCamelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __A ( self , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.fwd_bfs.retrace_path(__UpperCamelCase )
_lowerCAmelCase : List[str] = self.bwd_bfs.retrace_path(__UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_a : Tuple = (0, 0)
_a : Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a : int = time.time()
_a : List[str] = BreadthFirstSearch(init, goal)
_a : Dict = bfs.search()
_a : List[Any] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
_a : Tuple = time.time()
_a : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal)
_a : int = bd_bfs.search()
_a : int = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = WavaVecaPhonemeCTCTokenizer
_UpperCamelCase : Tuple = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Tuple = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_lowerCAmelCase : Optional[Any] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : Tuple = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
def __A ( self , a__ , a__=False , a__=20 , a__=5 ):
_lowerCAmelCase : List[Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )) for i in range(len(a__ ) )]
_lowerCAmelCase : Union[str, Any] = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
_lowerCAmelCase : Tuple = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
_lowerCAmelCase : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase : List[str] = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase : Tuple = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
_lowerCAmelCase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
_lowerCAmelCase : Union[str, Any] = """ """ + output_txt
_lowerCAmelCase : str = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_lowerCAmelCase : Union[str, Any] = tokenizer("""m xxx ɪ""" , do_phonemize=a__ ).input_ids
self.assertEqual(a__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_lowerCAmelCase : str = tokenizer("""m aaa ɪ ccc""" , do_phonemize=a__ ).input_ids
self.assertEqual(a__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
_lowerCAmelCase : int = tokenizer("""maɪ c""" , do_phonemize=a__ ).input_ids
self.assertEqual(a__ , [3, 200] ) # mai should be <unk> (=3)
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Union[str, Any] = """Hello how are you"""
_lowerCAmelCase : int = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(a__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def __A ( self ):
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Dict = """Hello how are you"""
_lowerCAmelCase : int = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a__ ).input_ids , tokenizer(a__ , do_phonemize=a__ ).input_ids )
def __A ( self ):
_lowerCAmelCase : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Dict = """Hello how are you"""
_lowerCAmelCase : Union[str, Any] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
_lowerCAmelCase : Dict = tokenizer.decode(tokenizer(a__ ).input_ids )
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_lowerCAmelCase : Tuple = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase : Tuple = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , batch_tokens[0] )
self.assertEqual(a__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def __A ( self ):
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : Union[str, Any] = """Hello how are you"""
_lowerCAmelCase : Optional[Any] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(a__ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def __A ( self ):
_lowerCAmelCase : int = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : int = """Hello how are you"""
_lowerCAmelCase : List[Any] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a__ ).input_ids , tokenizer(a__ , do_phonemize=a__ ).input_ids )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_lowerCAmelCase : Tuple = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_lowerCAmelCase : Optional[Any] = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , batch_tokens[0] )
self.assertEqual(a__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(a__ , filter_word_delimiter_token=a__ )
self.assertEqual(a__ , batch_tokens[0] )
self.assertEqual(a__ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def __A ( self ):
_lowerCAmelCase : int = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : Union[str, Any] = """Hello how are you"""
_lowerCAmelCase : Optional[int] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
_lowerCAmelCase : Dict = tokenizer.decode(tokenizer(a__ ).input_ids , filter_word_delimiter_token=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : int = """Hello how are you"""
_lowerCAmelCase : Dict = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
_lowerCAmelCase : List[Any] = tokenizer.decode(tokenizer(a__ ).input_ids , filter_word_delimiter_token=a__ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=a__ )
_lowerCAmelCase : List[Any] = """Hello how are you"""
_lowerCAmelCase : Optional[int] = tokenizer(a__ , phonemizer_lang="""en-us""" ).input_ids
_lowerCAmelCase : int = tokenizer(a__ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(a__ , a__ )
_lowerCAmelCase : Optional[int] = tokenizer.decode(a__ )
_lowerCAmelCase : str = tokenizer.decode(a__ )
self.assertEqual(a__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(a__ , """ɛ l o h aʊ a ʁ j u""" )
def __A ( self ):
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Dict = """Hello how Are you"""
_lowerCAmelCase : Any = """hello how are you"""
_lowerCAmelCase : Any = tokenizer(a__ ).input_ids
_lowerCAmelCase : Tuple = tokenizer(a__ ).input_ids
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_lowerCAmelCase : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
_lowerCAmelCase : Tuple = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def __A ( a__ , a__ ):
_lowerCAmelCase : int = [d[key] for d in offsets]
return retrieved_list
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_lowerCAmelCase : List[Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_lowerCAmelCase : Tuple = tokenizer.decode(a__ , output_char_offsets=a__ , filter_word_delimiter_token=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __A ( self ):
_lowerCAmelCase : Any = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(a__ , a__ ):
self.assertTrue(isinstance(a__ , a__ ) )
self.assertTrue(isinstance(outputs_list[0] , a__ ) )
# transform list to ModelOutput
_lowerCAmelCase : str = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(a__ , a__ ):
if isinstance(a__ , a__ ):
[recursive_check(a__ , a__ ) for la, la in zip(a__ , a__ )]
self.assertEqual(a__ , a__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
_lowerCAmelCase : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_lowerCAmelCase : str = tokenizer.batch_decode(a__ , output_char_offsets=a__ )
_lowerCAmelCase : Union[str, Any] = [tokenizer.decode(a__ , output_char_offsets=a__ ) for ids in sample_ids]
check_list_tuples_equal(a__ , a__ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def __A ( self ):
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def __A ( self ):
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def __A ( self ):
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : Union[str, Any] = tokenizer.vocab_size
_lowerCAmelCase : Optional[int] = len(a__ )
self.assertNotEqual(a__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCAmelCase : List[Any] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_lowerCAmelCase : Any = tokenizer.add_tokens(a__ )
_lowerCAmelCase : int = tokenizer.vocab_size
_lowerCAmelCase : Optional[int] = len(a__ )
self.assertNotEqual(a__ , 0 )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , len(a__ ) )
self.assertEqual(a__ , all_size + len(a__ ) )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=a__ )
self.assertGreaterEqual(len(a__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_lowerCAmelCase : List[str] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_lowerCAmelCase : str = tokenizer.add_special_tokens(a__ )
_lowerCAmelCase : Optional[Any] = tokenizer.vocab_size
_lowerCAmelCase : List[str] = len(a__ )
self.assertNotEqual(a__ , 0 )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , len(a__ ) )
self.assertEqual(a__ , all_size_a + len(a__ ) )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=a__ )
self.assertGreaterEqual(len(a__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.""" )
def __A ( self ):
pass
@unittest.skip("""The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Any = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : Any = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(output["""text"""] , a__ )
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Optional[Any]:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : Optional[int] = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Dict ) -> List[str]:
return (-y * np.log(_lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Union[str, Any] ) -> List[str]:
_lowerCAmelCase : List[str] = np.dot(_lowerCamelCase ,_lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase ) ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any]=70000 ) -> List[str]:
_lowerCAmelCase : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = np.dot(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = sigmoid_function(_lowerCamelCase )
_lowerCAmelCase : str = np.dot(x.T ,h - y ) / y.size
_lowerCAmelCase : Any = theta - alpha * gradient # updating the weights
_lowerCAmelCase : List[str] = np.dot(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Dict = sigmoid_function(_lowerCamelCase )
_lowerCAmelCase : str = cost_function(_lowerCamelCase ,_lowerCamelCase )
if iterations % 100 == 0:
print(f"loss: {j} \t" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_a : Union[str, Any] = datasets.load_iris()
_a : Tuple = iris.data[:, :2]
_a : Tuple = (iris.target != 0) * 1
_a : Any = 0.1
_a : List[Any] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> int:
return sigmoid_function(
np.dot(_lowerCamelCase ,_lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
(_a) : int = (x[:, 0].min(), x[:, 0].max())
(_a) : Any = (x[:, 1].min(), x[:, 1].max())
(_a) : Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_a : Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()]
_a : List[str] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : List[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def __A ( self ):
_lowerCAmelCase : List[str] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def __A ( self ):
_lowerCAmelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(a__ ) )
def __A ( self ):
_lowerCAmelCase : str = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(a__ ) )
def __A ( self ):
_lowerCAmelCase : List[str] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(a__ ) )
def __A ( self ):
_lowerCAmelCase : Optional[int] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCAmelCase : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __A ( self ):
_lowerCAmelCase : List[str] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCAmelCase : Dict = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __A ( self ):
# pass variant but use the non-variant filenames
_lowerCAmelCase : List[str] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCAmelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __A ( self ):
_lowerCAmelCase : Dict = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCAmelCase : List[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(a__ , variant=a__ ) )
def __A ( self ):
_lowerCAmelCase : int = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCAmelCase : Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __A ( self ):
# pass variant but use the non-variant filenames
_lowerCAmelCase : Optional[int] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCAmelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(a__ , variant=a__ ) )
def __A ( self ):
_lowerCAmelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCAmelCase : str = '''fp16'''
self.assertFalse(is_safetensors_compatible(a__ , variant=a__ ) )
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
_a : Optional[Any] = namedtuple('covid_data', 'cases deaths recovered')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] = "https://www.worldometers.info/coronavirus/" ) -> List[str]:
_lowerCAmelCase : Optional[Any] = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(UpperCamelCase__ ).content ).xpath(UpperCamelCase__ ) )
_a : List[str] = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> list[int]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_lowerCAmelCase : Optional[int] = [True] * (num + 1)
_lowerCAmelCase : Union[str, Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,lowerCamelCase__ ):
_lowerCAmelCase : int = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Dict = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Optional[Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_a : Optional[Any] = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=True , a__=None , a__=True , ):
_lowerCAmelCase : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Union[str, Any] = min_resolution
_lowerCAmelCase : int = max_resolution
_lowerCAmelCase : str = do_resize
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : Union[str, Any] = apply_ocr
def __A ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
self.assertTrue(hasattr(a__ , """apply_ocr""" ) )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , a__ )
self.assertIsInstance(encoding.boxes , a__ )
# Test batched
_lowerCAmelCase : Optional[int] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase : Tuple = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase : int = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __A ( self ):
# with apply_OCR = True
_lowerCAmelCase : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowerCAmelCase : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
_lowerCAmelCase : Tuple = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowerCAmelCase : Optional[int] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
_lowerCAmelCase : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a__ )
self.assertListEqual(encoding.boxes , a__ )
# with apply_OCR = False
_lowerCAmelCase : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=a__ )
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __A :
def __init__( self , a__ = 6 ):
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
self.create_linked_list(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = Node()
_lowerCAmelCase : Any = current_node
_lowerCAmelCase : Optional[Any] = current_node
_lowerCAmelCase : Any = current_node
for _ in range(1 , a__ ):
_lowerCAmelCase : Dict = Node()
_lowerCAmelCase : Union[str, Any] = current_node
_lowerCAmelCase : Optional[Any] = previous_node
_lowerCAmelCase : Optional[Any] = current_node
_lowerCAmelCase : List[str] = self.front
_lowerCAmelCase : Dict = previous_node
def __A ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __A ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def __A ( self , a__ ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowerCAmelCase : Dict = self.rear.next
if self.rear:
_lowerCAmelCase : int = data
def __A ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowerCAmelCase : Tuple = self.front.data
_lowerCAmelCase : Tuple = None
return data
_lowerCAmelCase : Optional[int] = self.front
_lowerCAmelCase : List[Any] = old_front.next
_lowerCAmelCase : int = old_front.data
_lowerCAmelCase : Dict = None
return data
def __A ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def __A ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __A :
def __init__( self ):
_lowerCAmelCase : Any | None = None
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str]=1024 ,_lowerCamelCase : Union[str, Any]=1024 ,_lowerCamelCase : str=False ,**_lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[Any] = SeqaSeqDataset(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,type_path="""train""" ,**_lowerCamelCase )
_lowerCAmelCase : List[Any] = tok.pad_token_id
def get_lens(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Any = tqdm(
DataLoader(_lowerCamelCase ,batch_size=512 ,num_workers=8 ,shuffle=_lowerCamelCase ,collate_fn=ds.collate_fn ) ,desc=str(ds.len_file ) ,)
_lowerCAmelCase : Dict = []
for batch in dl:
_lowerCAmelCase : List[str] = batch["""input_ids"""].ne(_lowerCamelCase ).sum(1 ).tolist()
_lowerCAmelCase : Optional[Any] = batch["""labels"""].ne(_lowerCamelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_lowerCamelCase ,_lowerCamelCase ):
max_lens.append(max(_lowerCamelCase ,_lowerCamelCase ) )
else:
max_lens.extend(_lowerCamelCase )
return max_lens
_lowerCAmelCase : Optional[Any] = get_lens(_lowerCamelCase )
_lowerCAmelCase : int = SeqaSeqDataset(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,type_path="""val""" ,**_lowerCamelCase )
_lowerCAmelCase : int = get_lens(_lowerCamelCase )
pickle_save(_lowerCamelCase ,train_ds.len_file )
pickle_save(_lowerCamelCase ,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_torch
def __A ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCAmelCase : int = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_lowerCAmelCase : str = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_lowerCAmelCase : int = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_lowerCAmelCase : Optional[Any] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(a__ )
BertModel.from_pretrained(a__ )
BertTokenizer.from_pretrained(a__ )
pipeline(task="""fill-mask""" , model=a__ )
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : Any = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_lowerCAmelCase : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase : Dict = """1"""
_lowerCAmelCase : Optional[int] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __A ( self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCAmelCase : Dict = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_lowerCAmelCase : List[str] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_lowerCAmelCase : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_lowerCAmelCase : Dict = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(a__ )
BertModel.from_pretrained(a__ )
BertTokenizer.from_pretrained(a__ )
pipeline(task="""fill-mask""" , model=a__ )
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : int = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_lowerCAmelCase : List[Any] = self.get_env()
_lowerCAmelCase : List[str] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __A ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCAmelCase : str = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_lowerCAmelCase : int = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_lowerCAmelCase : Union[str, Any] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : int = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_lowerCAmelCase : Any = self.get_env()
_lowerCAmelCase : int = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
_lowerCAmelCase : Optional[int] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase : Any = """1"""
_lowerCAmelCase : List[Any] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __A ( self ):
_lowerCAmelCase : Optional[Any] = """
from transformers import pipeline
"""
_lowerCAmelCase : List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_lowerCAmelCase : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_lowerCAmelCase : List[Any] = self.get_env()
_lowerCAmelCase : Union[str, Any] = """1"""
_lowerCAmelCase : Tuple = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
_lowerCAmelCase : Tuple = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def __A ( self ):
_lowerCAmelCase : List[str] = """
from transformers import AutoModel
"""
_lowerCAmelCase : Optional[Any] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : Tuple = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_lowerCAmelCase : Dict = self.get_env()
_lowerCAmelCase : Tuple = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase : Dict = """1"""
_lowerCAmelCase : List[Any] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase = 42
_UpperCamelCase = None
_UpperCamelCase = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ,_lowerCamelCase : int = 10 ) -> int:
_lowerCAmelCase : defaultdict = defaultdict(_lowerCamelCase )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_lowerCAmelCase : Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
_lowerCAmelCase : Dict = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_lowerCamelCase ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = OpenAIGPTTokenizer
_UpperCamelCase : Tuple = OpenAIGPTTokenizerFast
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Tuple = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_lowerCAmelCase : Union[str, Any] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : int = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(a__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , a__ ):
return "lower newer", "lower newer"
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase : Optional[int] = """lower"""
_lowerCAmelCase : Optional[Any] = ["""low""", """er</w>"""]
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = tokens + ["""<unk>"""]
_lowerCAmelCase : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[Any] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
_lowerCAmelCase : Dict = _modexpt(_lowerCamelCase ,exponent // 2 ,_lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowerCamelCase ,exponent - 1 ,_lowerCamelCase )) % modulo_value
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1777 ,_lowerCamelCase : int = 1855 ,_lowerCamelCase : int = 8 ) -> int:
_lowerCAmelCase : List[Any] = base
for _ in range(1 ,_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = _modexpt(_lowerCamelCase ,_lowerCamelCase ,10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=125 , a__=None , **a__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : Dict = [F"<extra_id_{i}>" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : int = len(set(filter(lambda a__ : bool("""extra_id""" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
_lowerCAmelCase : Any = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
_lowerCAmelCase : Any = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
_lowerCAmelCase : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , **a__ , )
_lowerCAmelCase : int = extra_ids
_lowerCAmelCase : Optional[int] = 2**8 # utf is 8 bits
# define special tokens dict
_lowerCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_lowerCAmelCase : Dict = len(self.special_tokens_encoder )
_lowerCAmelCase : Union[str, Any] = len(a__ )
for i, token in enumerate(a__ ):
_lowerCAmelCase : Any = self.vocab_size + i - n
_lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __A ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ ):
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : int = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = [chr(a__ ) for i in text.encode("""utf-8""" )]
return tokens
def __A ( self , a__ ):
if token in self.special_tokens_encoder:
_lowerCAmelCase : Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_lowerCAmelCase : List[Any] = self.added_tokens_encoder[token]
elif len(a__ ) != 1:
_lowerCAmelCase : List[Any] = self.unk_token_id
else:
_lowerCAmelCase : List[Any] = ord(a__ ) + self._num_special_tokens
return token_id
def __A ( self , a__ ):
if index in self.special_tokens_decoder:
_lowerCAmelCase : Optional[int] = self.special_tokens_decoder[index]
else:
_lowerCAmelCase : Dict = chr(index - self._num_special_tokens )
return token
def __A ( self , a__ ):
_lowerCAmelCase : Dict = B""""""
for token in tokens:
if token in self.special_tokens_decoder:
_lowerCAmelCase : Tuple = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
_lowerCAmelCase : List[str] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
_lowerCAmelCase : Union[str, Any] = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
_lowerCAmelCase : Dict = token.encode("""utf-8""" )
else:
_lowerCAmelCase : Optional[int] = bytes([ord(a__ )] )
bstring += tok_string
_lowerCAmelCase : Any = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def __A ( self , a__ , a__ = None ):
return ()
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
_a : Dict = 8.9_88e9 # units = N * m^s * C^-2
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> dict[str, float]:
_lowerCAmelCase : List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
_lowerCAmelCase : str = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_lowerCAmelCase : Any = abs(_lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_lowerCAmelCase : int = abs(_lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_lowerCAmelCase : Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(_lowerCamelCase )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = GPTaTokenizer
_UpperCamelCase : Optional[int] = GPTaTokenizerFast
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : str = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Dict = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Any = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : List[Any] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : List[str] = """lower newer"""
_lowerCAmelCase : Union[str, Any] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : List[str] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : Dict = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : int = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Optional[Any] = """This is a simple input"""
_lowerCAmelCase : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Tuple = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Any = """This is a simple input"""
_lowerCAmelCase : List[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : List[str] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Union[str, Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[Any] = tokenizer.pad_token_id
_lowerCAmelCase : Tuple = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : List[str] = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : Tuple = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : str = """$$$"""
_lowerCAmelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : List[str] = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = tokenizer.bos_token_id
_lowerCAmelCase : Optional[int] = tokenizer(a__ )
_lowerCAmelCase : Optional[Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __A ( self ):
pass
def __A ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
_lowerCAmelCase : List[Any] = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : int = """Encode this."""
_lowerCAmelCase : Dict = """This one too please."""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ , add_special_tokens=a__ )
encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : List[Any] = tokenizer.encode_plus(
a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , )
_lowerCAmelCase : Tuple = encoded_sequence_dict["""input_ids"""]
_lowerCAmelCase : List[str] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(a__ ) , len(a__ ) )
_lowerCAmelCase : Dict = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ )
]
_lowerCAmelCase : List[str] = [x for x in filtered_sequence if x is not None]
self.assertEqual(a__ , a__ )
@require_tokenizers
class __A ( unittest.TestCase ):
def __A ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
_lowerCAmelCase : Tuple = """A photo of a cat"""
_lowerCAmelCase : List[str] = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""./test_opt""" )
_lowerCAmelCase : List[Any] = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
def __A ( self ):
_lowerCAmelCase : str = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=a__ )
_lowerCAmelCase : Any = """A photo of a cat"""
_lowerCAmelCase : Dict = tokenizer.encode(
a__ , )
# Same as above
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def __A ( self ):
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
_lowerCAmelCase : Dict = """bos"""
_lowerCAmelCase : str = tokenizer.get_vocab()["""bos"""]
_lowerCAmelCase : Any = """A photo of a cat"""
_lowerCAmelCase : List[Any] = tokenizer.encode(
a__ , )
# We changed the bos token
self.assertEqual(a__ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [31957, 250, 1345, 9, 10, 4758] )
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> bool:
_lowerCAmelCase : List[Any] = first_str.lower().strip()
_lowerCAmelCase : Union[str, Any] = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase : str = first_str.replace(""" """ ,"""""" )
_lowerCAmelCase : Any = second_str.replace(""" """ ,"""""" )
# Strings of different lengths are not anagrams
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCAmelCase : defaultdict[str, int] = defaultdict(_lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_a : Union[str, Any] = input('Enter the first string ').strip()
_a : Union[str, Any] = input('Enter the second string ').strip()
_a : List[str] = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int ) -> int:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowerCAmelCase : Union[str, Any] = [p / w for p, w in zip(_lowerCamelCase ,_lowerCamelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowerCAmelCase : Union[str, Any] = sorted(_lowerCamelCase )
# declaring useful variables
_lowerCAmelCase : Dict = len(_lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : Dict = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowerCAmelCase : List[Any] = sorted_profit_by_weight[length - i - 1]
_lowerCAmelCase : Union[str, Any] = profit_by_weight.index(_lowerCamelCase )
_lowerCAmelCase : Dict = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
_a : List[Any] = [int(x) for x in input('Input profits separated by spaces: ').split()]
_a : List[Any] = [int(x) for x in input('Input weights separated by spaces: ').split()]
_a : str = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
"""simple docstring"""
_a : dict[str, float] = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.35_58_18,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ,_lowerCamelCase : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowerCAmelCase : Tuple = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = IFInpaintingPipeline
_UpperCamelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def __A ( self ):
return self._get_dummy_components()
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Dict = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __A ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __A ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __A ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __A ( self ):
self._test_save_load_local()
def __A ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self ):
super().__init__()
_lowerCAmelCase : Any = nn.Linear(3 , 4 )
_lowerCAmelCase : int = nn.BatchNormad(4 )
_lowerCAmelCase : List[str] = nn.Linear(4 , 5 )
def __A ( self , a__ ):
return self.lineara(self.batchnorm(self.lineara(a__ ) ) )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self , a__ , *a__ , **a__ ):
return (args[0] + 1,) + args[1:], kwargs
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self , a__ , a__ ):
return output + 1
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = ModelForTest()
_lowerCAmelCase : Optional[Any] = ModelHook()
add_hook_to_module(a__ , a__ )
self.assertEqual(test_model._hf_hook , a__ )
self.assertTrue(hasattr(a__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(a__ )
self.assertFalse(hasattr(a__ , """_hf_hook""" ) )
self.assertFalse(hasattr(a__ , """_old_forward""" ) )
def __A ( self ):
_lowerCAmelCase : Optional[int] = ModelForTest()
_lowerCAmelCase : int = ModelHook()
add_hook_to_module(a__ , a__ )
add_hook_to_module(a__ , a__ , append=a__ )
self.assertEqual(isinstance(test_model._hf_hook , a__ ) , a__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(a__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(a__ )
self.assertFalse(hasattr(a__ , """_hf_hook""" ) )
self.assertFalse(hasattr(a__ , """_old_forward""" ) )
def __A ( self ):
_lowerCAmelCase : Tuple = ModelForTest()
_lowerCAmelCase : Optional[int] = torch.randn(2 , 3 )
_lowerCAmelCase : Optional[Any] = test_model(x + 1 )
_lowerCAmelCase : Optional[int] = test_model(x + 2 )
_lowerCAmelCase : Tuple = PreForwardHook()
add_hook_to_module(a__ , a__ )
_lowerCAmelCase : List[str] = test_model(a__ )
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCAmelCase : int = PreForwardHook()
add_hook_to_module(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = test_model(a__ )
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCAmelCase : Tuple = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = test_model(a__ )
assert torch.allclose(a__ , a__ , atol=1e-5 )
def __A ( self ):
_lowerCAmelCase : List[str] = ModelForTest()
_lowerCAmelCase : int = torch.randn(2 , 3 )
_lowerCAmelCase : List[Any] = test_model(a__ )
_lowerCAmelCase : Any = PostForwardHook()
add_hook_to_module(a__ , a__ )
_lowerCAmelCase : Optional[int] = test_model(a__ )
self.assertTrue(torch.allclose(a__ , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_lowerCAmelCase : int = PostForwardHook()
add_hook_to_module(a__ , a__ )
_lowerCAmelCase : str = test_model(a__ )
self.assertTrue(torch.allclose(a__ , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_lowerCAmelCase : Optional[Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = test_model(a__ )
assert torch.allclose(a__ , output + 2 , atol=1e-5 )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = ModelForTest()
_lowerCAmelCase : Optional[Any] = torch.randn(2 , 3 )
_lowerCAmelCase : Any = test_model(a__ )
_lowerCAmelCase : Tuple = PostForwardHook()
add_hook_to_module(a__ , a__ )
_lowerCAmelCase : str = test_model(a__ )
self.assertTrue(torch.allclose(a__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Tuple = test_model(a__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_lowerCAmelCase : List[Any] = torch.randn(2 , 3 )
_lowerCAmelCase : Optional[int] = model(a__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(a__ , AlignDevicesHook(io_same_device=a__ ) )
_lowerCAmelCase : Union[str, Any] = torch.randn(2 , 3 ).to(0 )
_lowerCAmelCase : List[Any] = model(a__ )
self.assertEqual(output.device , torch.device(0 ) )
def __A ( self ):
_lowerCAmelCase : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_lowerCAmelCase : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase : Optional[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , a__ )
_lowerCAmelCase : Dict = torch.randn(2 , 3 )
_lowerCAmelCase : Union[str, Any] = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_lowerCAmelCase : Optional[int] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**a__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**a__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_lowerCAmelCase : Union[str, Any] = torch.randn(2 , 3 )
_lowerCAmelCase : Any = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def __A ( self ):
_lowerCAmelCase : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_lowerCAmelCase : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(a__ , execution_device=a__ , offload=a__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase : Dict = torch.device(a__ )
self.assertEqual(model.batchnorm.running_mean.device , a__ )
_lowerCAmelCase : Any = torch.randn(2 , 3 )
_lowerCAmelCase : Dict = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(a__ , execution_device=a__ , offload=a__ , offload_buffers=a__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_lowerCAmelCase : Union[str, Any] = torch.randn(2 , 3 )
_lowerCAmelCase : Union[str, Any] = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def __A ( self ):
_lowerCAmelCase : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_lowerCAmelCase : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
a__ , execution_device=a__ , offload=a__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_lowerCAmelCase : int = torch.device(a__ )
self.assertEqual(model.batchnorm.running_mean.device , a__ )
_lowerCAmelCase : Dict = torch.randn(2 , 3 )
_lowerCAmelCase : Any = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
a__ , execution_device=a__ , offload=a__ , weights_map=model.state_dict() , offload_buffers=a__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_lowerCAmelCase : List[str] = torch.randn(2 , 3 )
_lowerCAmelCase : Any = model(a__ )
self.assertEqual(output.device , a__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(a__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Tuple = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=_lowerCamelCase )
env_command_parser(subparsers=_lowerCamelCase )
launch_command_parser(subparsers=_lowerCamelCase )
tpu_command_parser(subparsers=_lowerCamelCase )
test_command_parser(subparsers=_lowerCamelCase )
# Let's go
_lowerCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(_lowerCamelCase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowerCamelCase )
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_a : int = Lock()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ,_lowerCamelCase : List[str] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : int ,_lowerCamelCase : str ) -> Tuple:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 ,10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowerCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowerCAmelCase : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowerCAmelCase : Any = min(_lowerCamelCase ,_lowerCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowerCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowerCAmelCase : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowerCAmelCase : Dict = max(_lowerCamelCase ,_lowerCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowerCAmelCase : Optional[Any] = Pipe()
_lowerCAmelCase : Optional[int] = Pipe()
process_array_.append(
Process(
target=_lowerCamelCase ,args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) ,) )
_lowerCAmelCase : List[Any] = temp_rs
_lowerCAmelCase : int = temp_rr
for i in range(1 ,len(_lowerCamelCase ) - 1 ):
_lowerCAmelCase : List[Any] = Pipe()
_lowerCAmelCase : Dict = Pipe()
process_array_.append(
Process(
target=_lowerCamelCase ,args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) ,) )
_lowerCAmelCase : List[Any] = temp_rs
_lowerCAmelCase : str = temp_rr
process_array_.append(
Process(
target=_lowerCamelCase ,args=(
len(_lowerCamelCase ) - 1,
arr[len(_lowerCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowerCamelCase ) - 1],
) ,) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 ,len(_lowerCamelCase ) ):
_lowerCAmelCase : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : List[str] = list(range(10 ,0 ,-1 ) )
print("""Initial List""" )
print(*_lowerCamelCase )
_lowerCAmelCase : int = odd_even_transposition(_lowerCamelCase )
print("""Sorted List\n""" )
print(*_lowerCamelCase )
if __name__ == "__main__":
main()
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_a : Tuple = None
_a : List[str] = logging.get_logger(__name__)
_a : Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a : Tuple = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
_a : Dict = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
_a : Optional[Any] = '▁'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
_UpperCamelCase : List[str] = BarthezTokenizer
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Optional[int] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
_lowerCAmelCase : Optional[Any] = vocab_file
_lowerCAmelCase : Dict = False if not self.vocab_file else True
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
_lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : List[str] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCamelCase : Optional[List[bool]]
_UpperCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : int = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "pegasus"
_UpperCamelCase : int = ["past_key_values"]
_UpperCamelCase : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a__=50265 , a__=1024 , a__=12 , a__=4096 , a__=16 , a__=12 , a__=4096 , a__=16 , a__=0.0 , a__=0.0 , a__=True , a__=True , a__="gelu" , a__=1024 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=0 , a__=False , a__=0 , a__=1 , a__=1 , **a__ , ):
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Dict = d_model
_lowerCAmelCase : List[str] = encoder_ffn_dim
_lowerCAmelCase : Any = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Union[str, Any] = decoder_ffn_dim
_lowerCAmelCase : Union[str, Any] = decoder_layers
_lowerCAmelCase : List[Any] = decoder_attention_heads
_lowerCAmelCase : Tuple = dropout
_lowerCAmelCase : Dict = attention_dropout
_lowerCAmelCase : Any = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Optional[int] = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Tuple = decoder_layerdrop
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : int = encoder_layers
_lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
@property
def __A ( self ):
return self.encoder_attention_heads
@property
def __A ( self ):
return self.d_model
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_a : Optional[Any] = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict=True ) -> List[str]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE_ ) )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = None
_UpperCamelCase : Dict = None
def __A ( self , a__ , a__ ):
with TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : int = dataset_module_factory(a__ , cache_dir=a__ )
_lowerCAmelCase : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=a__ )
_lowerCAmelCase : DatasetBuilder = builder_cls(
cache_dir=a__ , config_name=a__ , hash=dataset_module.hash , )
_lowerCAmelCase : Any = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a__ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
_lowerCAmelCase : Optional[Any] = cached_path(a__ , cache_dir=a__ )
self.assertTrue(os.path.exists(a__ ) )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> str:
'''simple docstring'''
_lowerCAmelCase : str = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
_lowerCAmelCase : List[str] = dataset_module_factory("""wikipedia""" ,cache_dir=_lowerCamelCase )
_lowerCAmelCase : Any = import_main_class(dataset_module.module_path )
_lowerCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_lowerCamelCase ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_lowerCAmelCase : Any = None
builder_instance.download_and_prepare()
_lowerCAmelCase : List[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dataset_module_factory("""wikipedia""" ,cache_dir=_lowerCamelCase )
_lowerCAmelCase : List[Any] = import_main_class(dataset_module.module_path ,dataset=_lowerCamelCase )
_lowerCAmelCase : DatasetBuilder = builder_cls(
cache_dir=_lowerCamelCase ,config_name="""20220301.frr""" ,hash=dataset_module.hash ,)
_lowerCAmelCase : List[str] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCamelCase ,_lowerCamelCase )
assert "train" in ds
assert isinstance(ds["""train"""] ,_lowerCamelCase )
assert next(iter(ds["""train"""] ) )
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __A :
def __A ( self , a__ ):
raise NotImplementedError()
def __A ( self ):
raise NotImplementedError()
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = False , **a__ ):
_lowerCAmelCase : int = tokenizer
_lowerCAmelCase : List[Any] = skip_prompt
_lowerCAmelCase : int = decode_kwargs
# variables used in the streaming process
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = True
def __A ( self , a__ ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
_lowerCAmelCase : Dict = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCAmelCase : List[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCAmelCase : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
_lowerCAmelCase : int = text[self.print_len :]
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(a__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCAmelCase : List[str] = text[self.print_len :]
self.print_len += len(a__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCAmelCase : Optional[int] = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(a__ )
self.on_finalized_text(a__ )
def __A ( self ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowerCAmelCase : Optional[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCAmelCase : Union[str, Any] = text[self.print_len :]
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Tuple = 0
else:
_lowerCAmelCase : Optional[Any] = """"""
_lowerCAmelCase : Dict = True
self.on_finalized_text(a__ , stream_end=a__ )
def __A ( self , a__ , a__ = False ):
print(a__ , flush=a__ , end="""""" if not stream_end else None )
def __A ( self , a__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = False , a__ = None , **a__ ):
super().__init__(a__ , a__ , **a__ )
_lowerCAmelCase : List[Any] = Queue()
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Union[str, Any] = timeout
def __A ( self , a__ , a__ = False ):
self.text_queue.put(a__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
return self
def __A ( self ):
_lowerCAmelCase : List[str] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_a : Optional[Any] = re.compile(r'\s+')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Dict:
return {"hash": hashlib.mda(re.sub(_lowerCamelCase ,"""""" ,example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [len(_lowerCamelCase ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(_lowerCamelCase ), "line_max": max(_lowerCamelCase )}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Any:
_lowerCAmelCase : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Any ) -> str:
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Any=5 ) -> int:
_lowerCAmelCase : str = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_lowerCAmelCase : List[str] = example["""content"""].splitlines()
for _, line in zip(range(_lowerCamelCase ) ,_lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Any=5 ,_lowerCamelCase : Any=0.05 ) -> Dict:
_lowerCAmelCase : Tuple = ["""unit tests""", """test file""", """configuration file"""]
_lowerCAmelCase : Any = example["""content"""].splitlines()
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = 0
# first test
for _, line in zip(range(_lowerCamelCase ) ,_lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCAmelCase : Tuple = example["""content"""].count("""\n""" )
_lowerCAmelCase : Optional[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> int:
_lowerCAmelCase : Any = ["""def """, """class """, """for """, """while """]
_lowerCAmelCase : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Union[str, Any]=4 ) -> str:
_lowerCAmelCase : Any = example["""content"""].splitlines()
_lowerCAmelCase : Tuple = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = tokenizer(example["""content"""] ,truncation=_lowerCamelCase )["""input_ids"""]
_lowerCAmelCase : int = len(example["""content"""] ) / len(_lowerCamelCase )
return {"ratio": ratio}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Dict:
_lowerCAmelCase : List[Any] = {}
results.update(get_hash(_lowerCamelCase ) )
results.update(line_stats(_lowerCamelCase ) )
results.update(alpha_stats(_lowerCamelCase ) )
results.update(char_token_ratio(_lowerCamelCase ) )
results.update(is_autogenerated(_lowerCamelCase ) )
results.update(is_config_or_test(_lowerCamelCase ) )
results.update(has_no_keywords(_lowerCamelCase ) )
results.update(has_few_assignments(_lowerCamelCase ) )
return results
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ,_lowerCamelCase : int ) -> Tuple:
if not check_uniques(_lowerCamelCase ,_lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Union[str, Any]:
with open(_lowerCamelCase ,"""rb""" ) as f_in:
with gzip.open(str(_lowerCamelCase ) + """.gz""" ,"""wb""" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCamelCase ,_lowerCamelCase )
os.unlink(_lowerCamelCase )
# Settings
_a : Dict = HfArgumentParser(PreprocessingArguments)
_a : Dict = parser.parse_args()
if args.num_workers is None:
_a : str = multiprocessing.cpu_count()
_a : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_a : Dict = time.time()
_a : int = load_dataset(args.dataset_name, split='train')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
_a : List[Any] = time.time()
_a : Any = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
_a : Optional[int] = set(ds.unique('hash'))
_a : Dict = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
_a : Dict = time.time()
_a : Dict = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_a : Optional[int] = time.time()
_a : Union[str, Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
_a : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
_a : Dict = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
_a : Any = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_a : Union[str, Any] = str(data_dir / F"""file-{file_number+1:012}.json""")
_a : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
_a : str = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
_a : Any = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
_a : Optional[int] = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : bool ,_lowerCamelCase : Optional[Dict[int, int]] = None ,_lowerCamelCase : bool = False ,) -> List[str]:
if label_map is not None:
for old_id, new_id in label_map.items():
_lowerCAmelCase : Dict = new_id
# turn into Numpy arrays
_lowerCAmelCase : Dict = np.array(_lowerCamelCase )
_lowerCAmelCase : str = np.array(_lowerCamelCase )
if reduce_labels:
_lowerCAmelCase : Optional[int] = 255
_lowerCAmelCase : Union[str, Any] = label - 1
_lowerCAmelCase : Optional[Any] = 255
_lowerCAmelCase : Optional[Any] = label != ignore_index
_lowerCAmelCase : List[str] = np.not_equal(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = pred_label[mask]
_lowerCAmelCase : Tuple = np.array(_lowerCamelCase )[mask]
_lowerCAmelCase : Tuple = pred_label[pred_label == label]
_lowerCAmelCase : int = np.histogram(_lowerCamelCase ,bins=_lowerCamelCase ,range=(0, num_labels - 1) )[0]
_lowerCAmelCase : Dict = np.histogram(_lowerCamelCase ,bins=_lowerCamelCase ,range=(0, num_labels - 1) )[0]
_lowerCAmelCase : List[str] = np.histogram(_lowerCamelCase ,bins=_lowerCamelCase ,range=(0, num_labels - 1) )[0]
_lowerCAmelCase : int = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str ,_lowerCamelCase : Any ,_lowerCamelCase : bool ,_lowerCamelCase : Optional[Dict[int, int]] = None ,_lowerCamelCase : bool = False ,) -> Optional[int]:
_lowerCAmelCase : Tuple = np.zeros((num_labels,) ,dtype=np.floataa )
_lowerCAmelCase : Any = np.zeros((num_labels,) ,dtype=np.floataa )
_lowerCAmelCase : List[Any] = np.zeros((num_labels,) ,dtype=np.floataa )
_lowerCAmelCase : Optional[Any] = np.zeros((num_labels,) ,dtype=np.floataa )
for result, gt_seg_map in zip(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Dict = intersect_and_union(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : bool ,_lowerCamelCase : Optional[int] = None ,_lowerCamelCase : Optional[Dict[int, int]] = None ,_lowerCamelCase : bool = False ,) -> Dict:
_lowerCAmelCase : Any = total_intersect_and_union(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# compute metrics
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Dict = total_area_intersect.sum() / total_area_label.sum()
_lowerCAmelCase : str = total_area_intersect / total_area_union
_lowerCAmelCase : Dict = total_area_intersect / total_area_label
_lowerCAmelCase : Union[str, Any] = np.nanmean(_lowerCamelCase )
_lowerCAmelCase : Dict = np.nanmean(_lowerCamelCase )
_lowerCAmelCase : List[str] = all_acc
_lowerCAmelCase : Any = iou
_lowerCAmelCase : Optional[int] = acc
if nan_to_num is not None:
_lowerCAmelCase : Optional[int] = {metric: np.nan_to_num(_lowerCamelCase ,nan=_lowerCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def __A ( self , a__ , a__ , a__ , a__ , a__ = None , a__ = None , a__ = False , ):
_lowerCAmelCase : str = mean_iou(
results=a__ , gt_seg_maps=a__ , num_labels=a__ , ignore_index=a__ , nan_to_num=a__ , label_map=a__ , reduce_labels=a__ , )
return iou_result
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Union[str, Any] ) -> List[Any]:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) ,end="""\t""" )
else:
print("""INF""" ,end="""\t""" )
print()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any] ) -> str:
_lowerCAmelCase : List[str] = [[float("""inf""" ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Any = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCamelCase ):
# looping through rows of graph array
for i in range(_lowerCamelCase ):
# looping through columns of graph array
for j in range(_lowerCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_lowerCAmelCase : List[str] = dist[i][k] + dist[k][j]
_print_dist(_lowerCamelCase ,_lowerCamelCase )
return dist, v
if __name__ == "__main__":
_a : Optional[Any] = int(input('Enter number of vertices: '))
_a : Dict = int(input('Enter number of edges: '))
_a : Any = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
_a : int = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
_a : Optional[Any] = int(input('Enter source:'))
_a : List[str] = int(input('Enter destination:'))
_a : Tuple = float(input('Enter weight:'))
_a : Tuple = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_a : Union[str, Any] = ''
_a : Optional[Any] = ''
_a : List[str] = ''
_a : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : int = get_dataset(_lowerCamelCase ,_lowerCamelCase )
print("""Processing...""" )
_lowerCAmelCase : List[Any] = update_image_and_anno(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for index, image in enumerate(_lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase : Tuple = random_chars(32 )
_lowerCAmelCase : Union[str, Any] = paths[index].split(os.sep )[-1].rsplit(""".""" ,1 )[0]
_lowerCAmelCase : List[str] = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" ,_lowerCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Success {index+1}/{len(_lowerCamelCase )} with {file_name}" )
_lowerCAmelCase : Tuple = []
for anno in new_annos[index]:
_lowerCAmelCase : str = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(_lowerCamelCase )
with open(f"/{file_root}.txt" ,"""w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> tuple[list, list]:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Optional[Any] = []
for label_file in glob.glob(os.path.join(_lowerCamelCase ,"""*.txt""" ) ):
_lowerCAmelCase : Optional[int] = label_file.split(os.sep )[-1].rsplit(""".""" ,1 )[0]
with open(_lowerCamelCase ) as in_file:
_lowerCAmelCase : Union[str, Any] = in_file.readlines()
_lowerCAmelCase : str = os.path.join(_lowerCamelCase ,f"{label_name}.jpg" )
_lowerCAmelCase : Dict = []
for obj_list in obj_lists:
_lowerCAmelCase : Dict = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int = 1 ) -> tuple[list, list, list]:
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
_lowerCAmelCase : Tuple = []
for idx in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = img_list[idx]
path_list.append(_lowerCamelCase )
_lowerCAmelCase : Any = anno_list[idx]
_lowerCAmelCase : Any = cva.imread(_lowerCamelCase )
if flip_type == 1:
_lowerCAmelCase : List[Any] = cva.flip(_lowerCamelCase ,_lowerCamelCase )
for bbox in img_annos:
_lowerCAmelCase : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase : str = cva.flip(_lowerCamelCase ,_lowerCamelCase )
for bbox in img_annos:
_lowerCAmelCase : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_lowerCamelCase )
new_imgs_list.append(_lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Any:
_lowerCAmelCase : Tuple = 384
_lowerCAmelCase : Optional[Any] = 7
if "tiny" in model_name:
_lowerCAmelCase : List[Any] = 96
_lowerCAmelCase : List[str] = (2, 2, 6, 2)
_lowerCAmelCase : int = (3, 6, 12, 24)
elif "small" in model_name:
_lowerCAmelCase : Any = 96
_lowerCAmelCase : Dict = (2, 2, 18, 2)
_lowerCAmelCase : Any = (3, 6, 12, 24)
elif "base" in model_name:
_lowerCAmelCase : Optional[int] = 128
_lowerCAmelCase : int = (2, 2, 18, 2)
_lowerCAmelCase : Union[str, Any] = (4, 8, 16, 32)
_lowerCAmelCase : str = 12
_lowerCAmelCase : List[Any] = 512
elif "large" in model_name:
_lowerCAmelCase : List[str] = 192
_lowerCAmelCase : Tuple = (2, 2, 18, 2)
_lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
_lowerCAmelCase : Union[str, Any] = 12
_lowerCAmelCase : Optional[int] = 768
# set label information
_lowerCAmelCase : Dict = 150
_lowerCAmelCase : List[Any] = """huggingface/label-files"""
_lowerCAmelCase : Optional[Any] = """ade20k-id2label.json"""
_lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase ,_lowerCamelCase ,repo_type="""dataset""" ) ,"""r""" ) )
_lowerCAmelCase : Tuple = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : int = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : List[str] = SwinConfig(
embed_dim=_lowerCamelCase ,depths=_lowerCamelCase ,num_heads=_lowerCamelCase ,window_size=_lowerCamelCase ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
_lowerCAmelCase : Dict = UperNetConfig(
backbone_config=_lowerCamelCase ,auxiliary_in_channels=_lowerCamelCase ,num_labels=_lowerCamelCase ,idalabel=_lowerCamelCase ,labelaid=_lowerCamelCase ,)
return config
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase : Optional[int] = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Dict = val
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : int ) -> str:
_lowerCAmelCase : Tuple = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase : str = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
_lowerCAmelCase : List[Any] = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[Any] = in_proj_weight[:dim, :]
_lowerCAmelCase : Union[str, Any] = in_proj_bias[: dim]
_lowerCAmelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase : Tuple = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase : Tuple = in_proj_weight[
-dim :, :
]
_lowerCAmelCase : Any = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Optional[Any]:
_lowerCAmelCase : Union[str, Any] = x.shape
_lowerCAmelCase : Any = x.reshape(_lowerCamelCase ,4 ,in_channel // 4 )
_lowerCAmelCase : Optional[Any] = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_lowerCamelCase ,_lowerCamelCase )
return x
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Dict:
_lowerCAmelCase : str = x.shape
_lowerCAmelCase : List[str] = x.reshape(_lowerCamelCase ,in_channel // 4 ,4 )
_lowerCAmelCase : int = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_lowerCamelCase ,_lowerCamelCase )
return x
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : Tuple = x.shape[0]
_lowerCAmelCase : Optional[Any] = x.reshape(4 ,in_channel // 4 )
_lowerCAmelCase : Optional[int] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_lowerCamelCase )
return x
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> List[str]:
_lowerCAmelCase : Tuple = x.shape[0]
_lowerCAmelCase : Any = x.reshape(in_channel // 4 ,4 )
_lowerCAmelCase : Any = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_lowerCamelCase )
return x
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[Any] ) -> List[Any]:
_lowerCAmelCase : Optional[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
_lowerCAmelCase : Union[str, Any] = model_name_to_url[model_name]
_lowerCAmelCase : Optional[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase ,map_location="""cpu""" ,file_name=_lowerCamelCase )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_lowerCamelCase ,param.shape )
_lowerCAmelCase : Optional[Any] = get_upernet_config(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = UperNetForSemanticSegmentation(_lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase : Optional[int] = state_dict.pop(_lowerCamelCase )
if "bn" in key:
_lowerCAmelCase : List[Any] = key.replace("""bn""" ,"""batch_norm""" )
_lowerCAmelCase : str = val
# rename keys
_lowerCAmelCase : int = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
read_in_q_k_v(_lowerCamelCase ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_lowerCAmelCase : Any = reverse_correct_unfold_reduction_order(_lowerCamelCase )
if "norm" in key:
_lowerCAmelCase : Any = reverse_correct_unfold_norm_order(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# verify on image
_lowerCAmelCase : Optional[Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCAmelCase : List[Any] = Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase : Tuple = SegformerImageProcessor()
_lowerCAmelCase : Optional[Any] = processor(_lowerCamelCase ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCAmelCase : Dict = model(_lowerCamelCase )
_lowerCAmelCase : List[str] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
_lowerCAmelCase : Tuple = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
_lowerCAmelCase : Tuple = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_lowerCamelCase ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = StableDiffusionInpaintPipeline
_UpperCamelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCamelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCamelCase : str = frozenset([] )
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
_lowerCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
_lowerCAmelCase : List[str] = CLIPTextModel(a__ )
_lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , a__ , a__=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_lowerCAmelCase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : str = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((64, 64) )
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Tuple = torch.manual_seed(a__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Dict = self.get_dummy_components()
_lowerCAmelCase : Any = StableDiffusionInpaintPipeline(**a__ )
_lowerCAmelCase : Any = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(a__ )
_lowerCAmelCase : Optional[int] = sd_pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_lowerCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_lowerCAmelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
_lowerCAmelCase : str = """stabilityai/stable-diffusion-2-inpainting"""
_lowerCAmelCase : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_lowerCAmelCase : Tuple = """Face of a yellow cat, high resolution, sitting on a park bench"""
_lowerCAmelCase : Tuple = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __A ( self ):
_lowerCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_lowerCAmelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_lowerCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
_lowerCAmelCase : Optional[Any] = """stabilityai/stable-diffusion-2-inpainting"""
_lowerCAmelCase : str = StableDiffusionInpaintPipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_lowerCAmelCase : int = """Face of a yellow cat, high resolution, sitting on a park bench"""
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : Any = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __A ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_lowerCAmelCase : Optional[Any] = """stabilityai/stable-diffusion-2-inpainting"""
_lowerCAmelCase : Optional[int] = PNDMScheduler.from_pretrained(a__ , subfolder="""scheduler""" )
_lowerCAmelCase : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
a__ , safety_checker=a__ , scheduler=a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : Tuple = """Face of a yellow cat, high resolution, sitting on a park bench"""
_lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
_lowerCAmelCase : str = pipe(
prompt=a__ , image=a__ , mask_image=a__ , generator=a__ , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["image_processor", "tokenizer"]
_UpperCamelCase : Any = "AutoImageProcessor"
_UpperCamelCase : int = "AutoTokenizer"
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = self.image_processor
def __call__( self , a__=None , a__=None , a__=None , **a__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase : Dict = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
_lowerCAmelCase : Tuple = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
_lowerCAmelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __A ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
_a : Union[str, Any] = tuple[int, int]
class __A :
def __init__( self , a__ , a__ ):
_lowerCAmelCase : set[int] = vertices
_lowerCAmelCase : dict[EdgeT, int] = {
(min(a__ ), max(a__ )): weight for edge, weight in edges.items()
}
def __A ( self , a__ , a__ ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_lowerCAmelCase : Optional[Any] = weight
def __A ( self ):
_lowerCAmelCase : Graph = Graph({min(self.vertices )} , {} )
_lowerCAmelCase : EdgeT
_lowerCAmelCase : int
_lowerCAmelCase : EdgeT
_lowerCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
_lowerCAmelCase : Optional[int] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowerCAmelCase : str = edge
_lowerCAmelCase : Any = weight
subgraph.add_edge(a__ , a__ )
return subgraph
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str = "p107_network.txt" ) -> int:
_lowerCAmelCase : str = os.path.abspath(os.path.dirname(_lowerCamelCase ) )
_lowerCAmelCase : str = os.path.join(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : dict[EdgeT, int] = {}
_lowerCAmelCase : list[str]
_lowerCAmelCase : int
_lowerCAmelCase : int
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : Optional[Any] = f.read().strip().split("""\n""" )
_lowerCAmelCase : int = [line.split(""",""" ) for line in data]
for edgea in range(1 ,len(_lowerCamelCase ) ):
for edgea in range(_lowerCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
_lowerCAmelCase : Union[str, Any] = int(adjaceny_matrix[edgea][edgea] )
_lowerCAmelCase : Graph = Graph(set(range(len(_lowerCamelCase ) ) ) ,_lowerCamelCase )
_lowerCAmelCase : Graph = graph.prims_algorithm()
_lowerCAmelCase : int = sum(graph.edges.values() )
_lowerCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
def get_matched_characters(_lowerCamelCase : str ,_lowerCamelCase : str ) -> str:
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[Any] = min(len(_stra ) ,len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCAmelCase : Tuple = int(max(0 ,i - limit ) )
_lowerCAmelCase : Union[str, Any] = int(min(i + limit + 1 ,len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = f"{_stra[0:_stra.index(_lowerCamelCase )]} {_stra[_stra.index(_lowerCamelCase ) + 1:]}"
return "".join(_lowerCamelCase )
# matching characters
_lowerCAmelCase : Any = get_matched_characters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_matched_characters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase )
# transposition
_lowerCAmelCase : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(_lowerCamelCase ,_lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
_lowerCAmelCase : int = 0.0
else:
_lowerCAmelCase : Optional[Any] = (
1
/ 3
* (
match_count / len(_lowerCamelCase )
+ match_count / len(_lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCAmelCase : Dict = 0
for ca, ca in zip(stra[:4] ,stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : Optional[Any] = {'vocab_file': 'spiece.model'}
_a : Union[str, Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
_a : Union[str, Any] = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = ["input_ids", "attention_mask"]
_UpperCamelCase : List[int] = []
def __init__( self , a__ , a__="<unk>" , a__="<s>" , a__="</s>" , a__="<pad>" , a__="[SEP]" , a__="[MASK]" , a__="[CLS]" , a__ = None , **a__ , ):
_lowerCAmelCase : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
_lowerCAmelCase : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
_lowerCAmelCase : Any = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
_lowerCAmelCase : int = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
_lowerCAmelCase : int = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
_lowerCAmelCase : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_lowerCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , sep_token=a__ , mask_token=a__ , cls_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCAmelCase : Optional[Any] = vocab_file
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def __A ( self ):
return self.sp_model.get_piece_size()
def __A ( self ):
_lowerCAmelCase : str = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , a__ ):
_lowerCAmelCase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , a__ ):
return self.sp_model.encode(a__ , out_type=a__ )
def __A ( self , a__ ):
return self.sp_model.piece_to_id(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Any = self.sp_model.IdToPiece(a__ )
return token
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : int = []
else:
current_sub_tokens.append(a__ )
_lowerCAmelCase : List[str] = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __A ( self , a__ , a__ = False , a__ = None , a__ = True , **a__ , ):
_lowerCAmelCase : Any = kwargs.pop("""use_source_tokenizer""" , a__ )
_lowerCAmelCase : Tuple = self.convert_ids_to_tokens(a__ , skip_special_tokens=a__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[int] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a__ ) )
_lowerCAmelCase : str = []
sub_texts.append(a__ )
else:
current_sub_text.append(a__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_lowerCAmelCase : Tuple = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(a__ ) )
else:
_lowerCAmelCase : Tuple = """""".join(a__ )
_lowerCAmelCase : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase : Any = self.clean_up_tokenization(a__ )
return clean_text
else:
return text
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Any = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
_lowerCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Dict = [self.cls_token_id]
_lowerCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_a : Optional[Any] = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_a : Optional[Any] = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_a : Any = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_a : Optional[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def __A ( self , a__ ):
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def __A ( self , a__ , a__ , a__=0.9 , a__=3 , a__=0.5 ):
if NLTK_VERSION >= version.Version("""3.6.5""" ):
_lowerCAmelCase : List[str] = [
meteor_score.single_meteor_score(
word_tokenize(a__ ) , word_tokenize(a__ ) , alpha=a__ , beta=a__ , gamma=a__ )
for ref, pred in zip(a__ , a__ )
]
else:
_lowerCAmelCase : Tuple = [
meteor_score.single_meteor_score(a__ , a__ , alpha=a__ , beta=a__ , gamma=a__ )
for ref, pred in zip(a__ , a__ )
]
return {"meteor": np.mean(a__ )}
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __A :
def __init__( self , a__ , a__=13 , a__=32 , a__=2 , a__=3 , a__=16 , a__=[1, 2, 1] , a__=[2, 2, 4] , a__=2 , a__=2.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=True , a__=0.0_2 , a__=1e-5 , a__=True , a__=None , a__=True , a__=10 , a__=8 , a__=["stage1", "stage2", "stage3"] , a__=[1, 2, 3] , ):
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Optional[int] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[str] = embed_dim
_lowerCAmelCase : str = depths
_lowerCAmelCase : Tuple = num_heads
_lowerCAmelCase : Dict = window_size
_lowerCAmelCase : Optional[Any] = mlp_ratio
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : List[Any] = use_absolute_embeddings
_lowerCAmelCase : Tuple = patch_norm
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : str = scope
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : List[Any] = encoder_stride
_lowerCAmelCase : Any = out_features
_lowerCAmelCase : int = out_indices
def __A ( self ):
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : str = MaskFormerSwinModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(a__ )
_lowerCAmelCase : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = MaskFormerSwinBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(a__ ):
_lowerCAmelCase : int = ["""stem"""]
_lowerCAmelCase : Union[str, Any] = MaskFormerSwinBackbone(config=a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : List[Any] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = MaskFormerSwinModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=a__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def __A ( self ):
pass
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(a__ )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def __A ( self ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def __A ( self ):
pass
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Union[str, Any] = outputs.hidden_states
_lowerCAmelCase : Any = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# Swin has a different seq_length
_lowerCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : int = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Tuple = 3
_lowerCAmelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Optional[Any] = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def __A ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __A ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a__ ):
_lowerCAmelCase : List[str] = 0
return t
def check_equivalence(a__ , a__ , a__ , a__={} ):
with torch.no_grad():
_lowerCAmelCase : Dict = model(**a__ , return_dict=a__ , **a__ )
_lowerCAmelCase : Dict = model(**a__ , return_dict=a__ , **a__ ).to_tuple()
def recursive_check(a__ , a__ ):
if isinstance(a__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a__ , a__ ):
recursive_check(a__ , a__ )
elif isinstance(a__ , a__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a__ , a__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a__ ) , set_nan_tensor_to_zero(a__ ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(a__ ).any()} and `inf`: {torch.isinf(a__ )}. Dict has"
F" `nan`: {torch.isnan(a__ ).any()} and `inf`: {torch.isinf(a__ )}."
) , )
recursive_check(a__ , a__ )
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Any = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : Optional[Any] = self._prepare_for_class(a__ , a__ )
check_equivalence(a__ , a__ , a__ )
_lowerCAmelCase : List[Any] = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCAmelCase : Tuple = self._prepare_for_class(a__ , a__ , return_labels=a__ )
check_equivalence(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : Dict = self._prepare_for_class(a__ , a__ )
check_equivalence(a__ , a__ , a__ , {"""output_hidden_states""": True} )
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCAmelCase : Any = self._prepare_for_class(a__ , a__ , return_labels=a__ )
check_equivalence(a__ , a__ , a__ , {"""output_hidden_states""": True} )
@require_torch
class __A ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = MaskFormerSwinConfig
def __A ( self ):
_lowerCAmelCase : Tuple = MaskFormerSwinModelTester(self )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = backbone_class(a__ )
backbone.to(a__ )
backbone.eval()
_lowerCAmelCase : Optional[Any] = backbone(**a__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_lowerCAmelCase : str = backbone(**a__ , output_hidden_states=a__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_lowerCAmelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_lowerCAmelCase : Any = backbone(**a__ , output_attentions=a__ )
self.assertIsNotNone(outputs.attentions )
| 718 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["image_processor", "tokenizer"]
_UpperCamelCase : Dict = "ChineseCLIPImageProcessor"
_UpperCamelCase : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a__=None , a__=None , **a__ ):
_lowerCAmelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a__ , )
_lowerCAmelCase : List[str] = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a__ , a__ )
_lowerCAmelCase : Any = self.image_processor
def __call__( self , a__=None , a__=None , a__=None , **a__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase : List[str] = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
_lowerCAmelCase : str = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
_lowerCAmelCase : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def __A ( self ):
_lowerCAmelCase : Any = self.tokenizer.model_input_names
_lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a__ , )
return self.image_processor_class
| 719 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : List[str] = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 0 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
_a : Any = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
_a : Any = ['a', 'b', 'c', 'd', 'e']
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ) -> int:
_lowerCAmelCase : List[Any] = start
# add current to visited
visited.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_lowerCAmelCase : str = topological_sort(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
_lowerCAmelCase : List[Any] = topological_sort(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_a : Tuple = topological_sort('a', [], [])
print(sort)
| 700 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 701 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = "EncodecFeatureExtractor"
_UpperCamelCase : Optional[int] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
_lowerCAmelCase : Optional[Any] = self.feature_extractor
_lowerCAmelCase : Dict = False
def __A ( self , a__=None , a__=None , a__=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a__ , language=a__ , no_timestamps=a__ )
def __call__( self , *a__ , **a__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__ )
_lowerCAmelCase : Any = kwargs.pop("""audio""" , a__ )
_lowerCAmelCase : Dict = kwargs.pop("""sampling_rate""" , a__ )
_lowerCAmelCase : List[Any] = kwargs.pop("""text""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : List[str] = args[0]
_lowerCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(a__ , **a__ )
if audio is not None:
_lowerCAmelCase : List[str] = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_lowerCAmelCase : Optional[int] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
_lowerCAmelCase : Union[str, Any] = audio_inputs["""padding_mask"""]
return inputs
def __A ( self , *a__ , **a__ ):
_lowerCAmelCase : Optional[int] = kwargs.pop("""audio""" , a__ )
_lowerCAmelCase : Dict = kwargs.pop("""padding_mask""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : Optional[int] = args[0]
_lowerCAmelCase : str = args[1:]
if audio_values is not None:
return self._decode_audio(a__ , padding_mask=a__ )
else:
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = to_numpy(a__ )
_lowerCAmelCase : List[str] = audio_values.shape
if padding_mask is None:
return list(a__ )
_lowerCAmelCase : Any = to_numpy(a__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_lowerCAmelCase : List[str] = seq_len - padding_mask.shape[-1]
_lowerCAmelCase : Optional[int] = 1 - self.feature_extractor.padding_value
_lowerCAmelCase : List[str] = np.pad(a__ , ((0, 0), (0, difference)) , """constant""" , constant_values=a__ )
_lowerCAmelCase : List[str] = audio_values.tolist()
for i in range(a__ ):
_lowerCAmelCase : Union[str, Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_lowerCAmelCase : Optional[int] = sliced_audio.reshape(a__ , -1 )
return audio_values
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __A ( nn.Module ):
def __init__( self , a__ = 16 , a__ = 88 , a__ = None , a__ = 1 , a__ = 0.0 , a__ = 32 , a__ = None , a__ = False , a__ = None , a__ = None , a__ = "geglu" , a__ = None , ):
super().__init__()
_lowerCAmelCase : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=a__ , attention_head_dim=a__ , in_channels=a__ , num_layers=a__ , dropout=a__ , norm_num_groups=a__ , cross_attention_dim=a__ , attention_bias=a__ , sample_size=a__ , num_vector_embeds=a__ , activation_fn=a__ , num_embeds_ada_norm=a__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_lowerCAmelCase : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_lowerCAmelCase : List[str] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_lowerCAmelCase : str = [1, 0]
def __A ( self , a__ , a__ , a__=None , a__=None , a__=None , a__ = True , ):
_lowerCAmelCase : Any = hidden_states
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Dict = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_lowerCAmelCase : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_lowerCAmelCase : Union[str, Any] = self.transformer_index_for_condition[i]
_lowerCAmelCase : str = self.transformers[transformer_index](
a__ , encoder_hidden_states=a__ , timestep=a__ , cross_attention_kwargs=a__ , return_dict=a__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_lowerCAmelCase : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_lowerCAmelCase : str = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=a__ )
| 703 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "Salesforce/blip-image-captioning-base"
_UpperCamelCase : Any = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_UpperCamelCase : Dict = "image_captioner"
_UpperCamelCase : Dict = AutoModelForVisionaSeq
_UpperCamelCase : str = ["image"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""vision"""] )
super().__init__(*a__ , **a__ )
def __A ( self , a__ ):
return self.pre_processor(images=a__ , return_tensors="""pt""" )
def __A ( self , a__ ):
return self.model.generate(**a__ )
def __A ( self , a__ ):
return self.pre_processor.batch_decode(a__ , skip_special_tokens=a__ )[0].strip()
| 704 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Dict ) -> List[Any]:
_lowerCAmelCase : Dict = s.rsplit(_lowerCamelCase ,_lowerCamelCase )
return new.join(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Dict:
_lowerCAmelCase : str = {}
_lowerCAmelCase : Optional[int] = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_lowerCAmelCase : Optional[Any] = key.replace(f"{group_key}." ,f"{group_key}.group." )
if "res_path" in key:
_lowerCAmelCase : List[Any] = key.replace("""res_path.""" ,"""res_path.path.""" )
if key.endswith(""".w""" ):
_lowerCAmelCase : Optional[int] = rreplace(_lowerCamelCase ,""".w""" ,""".weight""" ,1 )
if key.endswith(""".b""" ):
_lowerCAmelCase : Dict = rreplace(_lowerCamelCase ,""".b""" ,""".bias""" ,1 )
_lowerCAmelCase : Tuple = value.float()
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Optional[Any]=None ,_lowerCamelCase : List[str]=True ) -> Optional[Any]:
from dall_e import Encoder
_lowerCAmelCase : Optional[Any] = Encoder()
if os.path.exists(_lowerCamelCase ):
_lowerCAmelCase : int = torch.load(_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Any = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
_lowerCAmelCase : Tuple = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = FlavaImageCodebookConfig()
_lowerCAmelCase : Optional[int] = FlavaImageCodebook(_lowerCamelCase ).eval()
_lowerCAmelCase : Any = encoder.state_dict()
_lowerCAmelCase : List[str] = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Any = hf_model.state_dict()
_lowerCAmelCase : Optional[Any] = count_parameters(_lowerCamelCase )
_lowerCAmelCase : int = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a : Optional[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 706 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : Tuple = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase = WavaVecaPhonemeCTCTokenizer
_UpperCamelCase = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Tuple = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_lowerCAmelCase : Any = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : int = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
def __A ( self , a__ , a__=False , a__=20 , a__=5 ):
_lowerCAmelCase : Tuple = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )) for i in range(len(a__ ) )]
_lowerCAmelCase : List[Any] = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
_lowerCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
_lowerCAmelCase : List[str] = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase : Any = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase : str = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
_lowerCAmelCase : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
_lowerCAmelCase : Dict = """ """ + output_txt
_lowerCAmelCase : int = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_lowerCAmelCase : List[str] = tokenizer("""m xxx ɪ""" , do_phonemize=a__ ).input_ids
self.assertEqual(a__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_lowerCAmelCase : Dict = tokenizer("""m aaa ɪ ccc""" , do_phonemize=a__ ).input_ids
self.assertEqual(a__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
_lowerCAmelCase : Any = tokenizer("""maɪ c""" , do_phonemize=a__ ).input_ids
self.assertEqual(a__ , [3, 200] ) # mai should be <unk> (=3)
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Any = """Hello how are you"""
_lowerCAmelCase : str = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(a__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def __A ( self ):
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Union[str, Any] = """Hello how are you"""
_lowerCAmelCase : str = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a__ ).input_ids , tokenizer(a__ , do_phonemize=a__ ).input_ids )
def __A ( self ):
_lowerCAmelCase : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Dict = """Hello how are you"""
_lowerCAmelCase : Dict = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(tokenizer(a__ ).input_ids )
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_lowerCAmelCase : Tuple = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase : Any = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , batch_tokens[0] )
self.assertEqual(a__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : str = """Hello how are you"""
_lowerCAmelCase : Optional[int] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(a__ , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def __A ( self ):
_lowerCAmelCase : Any = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : int = """Hello how are you"""
_lowerCAmelCase : List[Any] = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a__ ).input_ids , tokenizer(a__ , do_phonemize=a__ ).input_ids )
def __A ( self ):
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_lowerCAmelCase : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_lowerCAmelCase : List[str] = tokenizer.decode(sample_ids[0] )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , batch_tokens[0] )
self.assertEqual(a__ , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_lowerCAmelCase : List[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=a__ )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(a__ , filter_word_delimiter_token=a__ )
self.assertEqual(a__ , batch_tokens[0] )
self.assertEqual(a__ , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : Tuple = """Hello how are you"""
_lowerCAmelCase : Dict = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
_lowerCAmelCase : Optional[int] = tokenizer.decode(tokenizer(a__ ).input_ids , filter_word_delimiter_token=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_lowerCAmelCase : Optional[int] = """Hello how are you"""
_lowerCAmelCase : Any = tokenizer.phonemize(a__ , phonemizer_lang="""en-us""" )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(tokenizer(a__ ).input_ids , filter_word_delimiter_token=a__ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , a__ )
def __A ( self ):
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=a__ )
_lowerCAmelCase : Tuple = """Hello how are you"""
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ , phonemizer_lang="""en-us""" ).input_ids
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(a__ , a__ )
_lowerCAmelCase : Dict = tokenizer.decode(a__ )
_lowerCAmelCase : Dict = tokenizer.decode(a__ )
self.assertEqual(a__ , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(a__ , """ɛ l o h aʊ a ʁ j u""" )
def __A ( self ):
_lowerCAmelCase : Any = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_lowerCAmelCase : Union[str, Any] = """Hello how Are you"""
_lowerCAmelCase : Optional[Any] = """hello how are you"""
_lowerCAmelCase : List[str] = tokenizer(a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ ).input_ids
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_lowerCAmelCase : str = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
_lowerCAmelCase : Tuple = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def __A ( a__ , a__ ):
_lowerCAmelCase : List[str] = [d[key] for d in offsets]
return retrieved_list
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_lowerCAmelCase : Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_lowerCAmelCase : Tuple = tokenizer.decode(a__ , output_char_offsets=a__ , filter_word_delimiter_token=a__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(a__ , a__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(a__ , a__ ):
self.assertTrue(isinstance(a__ , a__ ) )
self.assertTrue(isinstance(outputs_list[0] , a__ ) )
# transform list to ModelOutput
_lowerCAmelCase : List[Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(a__ , a__ ):
if isinstance(a__ , a__ ):
[recursive_check(a__ , a__ ) for la, la in zip(a__ , a__ )]
self.assertEqual(a__ , a__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
_lowerCAmelCase : Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_lowerCAmelCase : str = tokenizer.batch_decode(a__ , output_char_offsets=a__ )
_lowerCAmelCase : List[Any] = [tokenizer.decode(a__ , output_char_offsets=a__ ) for ids in sample_ids]
check_list_tuples_equal(a__ , a__ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def __A ( self ):
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def __A ( self ):
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def __A ( self ):
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : Dict = tokenizer.vocab_size
_lowerCAmelCase : List[str] = len(a__ )
self.assertNotEqual(a__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_lowerCAmelCase : Tuple = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_lowerCAmelCase : List[Any] = tokenizer.add_tokens(a__ )
_lowerCAmelCase : Optional[Any] = tokenizer.vocab_size
_lowerCAmelCase : Optional[Any] = len(a__ )
self.assertNotEqual(a__ , 0 )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , len(a__ ) )
self.assertEqual(a__ , all_size + len(a__ ) )
_lowerCAmelCase : str = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=a__ )
self.assertGreaterEqual(len(a__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_lowerCAmelCase : str = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_lowerCAmelCase : Dict = tokenizer.add_special_tokens(a__ )
_lowerCAmelCase : int = tokenizer.vocab_size
_lowerCAmelCase : Any = len(a__ )
self.assertNotEqual(a__ , 0 )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , len(a__ ) )
self.assertEqual(a__ , all_size_a + len(a__ ) )
_lowerCAmelCase : Any = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=a__ )
self.assertGreaterEqual(len(a__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def __A ( self ):
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def __A ( self ):
pass
def __A ( self ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
_lowerCAmelCase : List[str] = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : Tuple = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(output["""text"""] , a__ )
| 708 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_a : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
_a : Dict = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "retribert"
def __init__( self , a__=30522 , a__=768 , a__=8 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=True , a__=128 , a__=0 , **a__ , ):
super().__init__(pad_token_id=a__ , **a__ )
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : str = share_encoders
_lowerCAmelCase : int = projection_dim
| 710 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_a : Optional[int] = list[list[float | int]]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Matrix ,_lowerCamelCase : Matrix ) -> Matrix:
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Any = matrix[row][col]
_lowerCAmelCase : Optional[Any] = vector[row][0]
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Dict = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase ,_lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase : Any = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 ,_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : str = 0
for cola in range(col + 1 ,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 ,_lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : Any = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase ,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] ,10 )] for row in range(_lowerCamelCase )
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> Callable[[int], int]:
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Any = y_val
_lowerCAmelCase : List[str] = solve(_lowerCamelCase ,_lowerCamelCase )
def interpolated_func(_lowerCamelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Callable[[int], int] = question_function ,_lowerCamelCase : int = 10 ) -> int:
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 ,order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 ,order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Tuple = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self , a__ = 768 , ):
super().__init__()
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.zeros(1 , a__ ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.ones(1 , a__ ) )
def __A ( self , a__ = None , a__ = None , ):
_lowerCAmelCase : Any = nn.Parameter(self.mean.to(a__ ).to(a__ ) )
_lowerCAmelCase : Any = nn.Parameter(self.std.to(a__ ).to(a__ ) )
return self
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = (embeds * self.std) + self.mean
return embeds
| 712 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[str] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 0 |
"""simple docstring"""
import numpy
class __A :
def __init__( self , a__ , a__ ):
_lowerCAmelCase : int = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_lowerCAmelCase : Union[str, Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_lowerCAmelCase : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_lowerCAmelCase : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
_lowerCAmelCase : Union[str, Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_lowerCAmelCase : int = numpy.zeros(output_array.shape )
def __A ( self ):
_lowerCAmelCase : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_lowerCAmelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_lowerCAmelCase : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self ):
_lowerCAmelCase : str = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_lowerCAmelCase : Tuple = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_lowerCAmelCase : Optional[Any] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self , a__ , a__ , a__ ):
for iteration in range(1 , iterations + 1 ):
_lowerCAmelCase : Any = self.feedforward()
self.back_propagation()
if give_loss:
_lowerCAmelCase : Union[str, Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def __A ( self , a__ ):
_lowerCAmelCase : str = input_arr
_lowerCAmelCase : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_lowerCAmelCase : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_lowerCAmelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : numpy.ndarray ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : numpy.ndarray ) -> numpy.ndarray:
return (value) * (1 - (value))
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : List[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) ,dtype=numpy.floataa ,)
# True output values for the given input values.
_lowerCAmelCase : Optional[int] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa )
# Calling neural network class.
_lowerCAmelCase : str = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase ,output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase ,iterations=10 ,give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 714 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 0 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
class __A :
_UpperCamelCase : str
_UpperCamelCase : str = None
@staticmethod
def __A ( ):
raise NotImplementedError
def __A ( self , a__ , a__ , a__ , **a__ ):
raise NotImplementedError
def __A ( self , a__ ):
raise NotImplementedError
def __A ( self ):
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def __A ( cls ):
return F"`pip install {cls.pip_package or cls.name}`"
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "optuna"
@staticmethod
def __A ( ):
return is_optuna_available()
def __A ( self , a__ , a__ , a__ , **a__ ):
return run_hp_search_optuna(a__ , a__ , a__ , **a__ )
def __A ( self , a__ ):
return default_hp_space_optuna(a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = "ray"
_UpperCamelCase : Tuple = "'ray[tune]'"
@staticmethod
def __A ( ):
return is_ray_available()
def __A ( self , a__ , a__ , a__ , **a__ ):
return run_hp_search_ray(a__ , a__ , a__ , **a__ )
def __A ( self , a__ ):
return default_hp_space_ray(a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "sigopt"
@staticmethod
def __A ( ):
return is_sigopt_available()
def __A ( self , a__ , a__ , a__ , **a__ ):
return run_hp_search_sigopt(a__ , a__ , a__ , **a__ )
def __A ( self , a__ ):
return default_hp_space_sigopt(a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = "wandb"
@staticmethod
def __A ( ):
return is_wandb_available()
def __A ( self , a__ , a__ , a__ , **a__ ):
return run_hp_search_wandb(a__ , a__ , a__ , **a__ )
def __A ( self , a__ ):
return default_hp_space_wandb(a__ )
_a : Tuple = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : str = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Optional[Any] = available_backends[0].name
if len(_lowerCamelCase ) > 1:
logger.info(
f"{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 716 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.