code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowercase_ : Tuple = '''bart'''
lowercase_ : List[Any] = True
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
lowercase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
lowercase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
lowercase = qar_model.eval()
else:
lowercase , lowercase = (None, None)
if MODEL_TYPE == "bart":
lowercase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
lowercase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
lowercase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
lowercase = sas_model.eval()
else:
lowercase , lowercase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
lowercase = faiss.StandardGpuResources()
lowercase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
lowercase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
lowercase = faiss.IndexFlatIP(128 )
lowercase = faiss.index_cpu_to_gpu(lowerCAmelCase__ , 1 , lowerCAmelCase__ )
wikiaab_gpu_index_flat.add(lowerCAmelCase__ ) # TODO fix for larger GPU
else:
lowercase , lowercase = (None, None)
lowercase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
lowercase = elia["""train_eli5"""]
lowercase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
lowercase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase__ )
return (elia_train, eli5_train_q_index)
lowercase_ : str = load_indexes()
lowercase_ : List[Any] = load_models()
lowercase_ : List[Any] = load_train_data()
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[Any]=10 ):
lowercase = embed_questions_for_retrieval([question] , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = eli5_train_q_index.search(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = [elia_train[int(lowerCAmelCase__ )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : Union[str, Any]="wiki40b" , lowercase_ : str="dense" , lowercase_ : List[str]=10 ):
if source == "none":
lowercase , lowercase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowercase , lowercase = query_qa_dense_index(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
lowercase , lowercase = query_es_index(
lowerCAmelCase__ , lowerCAmelCase__ , index_name="""english_wiki40b_snippets_100w""" , n_results=lowerCAmelCase__ , )
lowercase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowercase = """question: {} context: {}""".format(lowerCAmelCase__ , lowerCAmelCase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowercase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowercase_ : None),
} )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any=64 , lowercase_ : int=256 , lowercase_ : Optional[Any]=False , lowercase_ : str=2 , lowercase_ : List[str]=0.95 , lowercase_ : Dict=0.8 ):
with torch.no_grad():
lowercase = qa_sas_generate(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , num_answers=1 , num_beams=lowerCAmelCase__ , min_len=lowerCAmelCase__ , max_len=lowerCAmelCase__ , do_sample=lowerCAmelCase__ , temp=lowerCAmelCase__ , top_p=lowerCAmelCase__ , top_k=lowerCAmelCase__ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
lowercase_ : Union[str, Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
lowercase_ : Tuple = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowercase_ : str = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
lowercase_ : Dict = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
lowercase_ : Optional[Any] = st.sidebar.checkbox('''Demo options''')
if demo_options:
lowercase_ : List[str] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
lowercase_ : List[Any] = action_list.index(action_st)
lowercase_ : List[str] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
lowercase_ : Union[str, Any] = show_type == '''Show full text of passages'''
else:
lowercase_ : Optional[int] = 3
lowercase_ : Tuple = True
lowercase_ : Dict = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
lowercase_ : Optional[int] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
lowercase_ : int = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
lowercase_ : Dict = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
lowercase_ : List[str] = '''wiki40b'''
lowercase_ : int = '''dense'''
lowercase_ : str = '''beam'''
lowercase_ : List[Any] = 2
lowercase_ : Union[str, Any] = 64
lowercase_ : List[str] = 256
lowercase_ : Optional[int] = None
lowercase_ : Tuple = None
lowercase_ : str = st.sidebar.checkbox('''Generation options''')
if generate_options:
lowercase_ : Dict = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
lowercase_ : Optional[Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
lowercase_ : Optional[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowercase_ : Any = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowercase_ : int = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowercase_ : Optional[int] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowercase_ : Optional[int] = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowercase_ : Any = None
# start main text
lowercase_ : Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
lowercase_ : str = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowercase_ : Optional[Any] = st.text_input('''Enter your question here:''', '''''')
else:
lowercase_ : Dict = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
lowercase_ : List[Any] = make_support(question, source=wiki_source, method='''dense''', n_results=10)
lowercase_ : List[str] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
lowercase_ : int = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowercase_ : Union[str, Any] = support_list[:10]
lowercase_ : str = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
lowercase_ : int = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowercase_ : Dict = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
lowercase_ : Tuple = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
lowercase_ : Optional[int] = res[1].strip()
if sec_titles == "":
lowercase_ : str = '''[{}]({})'''.format(res[0], wiki_url)
else:
lowercase_ : Dict = sec_titles.split(''' & ''')
lowercase_ : int = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
lowercase_ : Tuple = find_nearest_training(question)
lowercase_ : Optional[Any] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
lowercase_ : List[str] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
lowercase_ : Optional[Any] = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 706 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __UpperCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
@register_to_config
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False , ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase = nn.Embedding(a_ , a_ )
lowercase = nn.Embedding(a_ , a_ )
lowercase = False
lowercase = nn.Dropout(p=a_ )
lowercase = TaConfig(
vocab_size=a_ , d_model=a_ , num_heads=a_ , d_kv=a_ , d_ff=a_ , dropout_rate=a_ , feed_forward_proj=a_ , is_decoder=a_ , is_encoder_decoder=a_ , )
lowercase = nn.ModuleList()
for lyr_num in range(a_ ):
lowercase = TaBlock(a_ )
self.encoders.append(a_ )
lowercase = TaLayerNorm(a_ )
lowercase = nn.Dropout(p=a_ )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = self.token_embedder(a_ )
lowercase = encoder_input_tokens.shape[1]
lowercase = torch.arange(a_ , device=encoder_input_tokens.device )
x += self.position_encoding(a_ )
lowercase = self.dropout_pre(a_ )
# inverted the attention mask
lowercase = encoder_input_tokens.size()
lowercase = self.get_extended_attention_mask(a_ , a_ )
for lyr in self.encoders:
lowercase = lyr(a_ , a_ )[0]
lowercase = self.layer_norm(a_ )
return self.dropout_post(a_ ), encoder_inputs_mask
| 707 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 0 |
'''simple docstring'''
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = create_tensor(_snake_case )
lowercase = gather(_snake_case )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = [state.process_index]
lowercase = gather_object(_snake_case )
assert len(_snake_case ) == state.num_processes, F"""{gathered_obj}, {len(_snake_case )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
lowercase = create_tensor(_snake_case )
lowercase = broadcast(_snake_case )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
if state.is_main_process:
lowercase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase = torch.arange(state.num_processes ).to(state.device )
lowercase = pad_across_processes(_snake_case )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
if state.num_processes != 2:
return
lowercase = create_tensor(_snake_case )
lowercase = reduce(_snake_case , """sum""" )
lowercase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_snake_case , _snake_case ), F"""{reduced_tensor} != {truth_tensor}"""
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
if state.num_processes != 2:
return
lowercase = create_tensor(_snake_case )
lowercase = reduce(_snake_case , """mean""" )
lowercase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_snake_case , _snake_case ), F"""{reduced_tensor} != {truth_tensor}"""
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
main()
def SCREAMING_SNAKE_CASE ( ):
lowercase = PartialState()
state.print(F"""State: {state}""" )
state.print("""testing gather""" )
test_gather(_snake_case )
state.print("""testing gather_object""" )
test_gather_object(_snake_case )
state.print("""testing broadcast""" )
test_broadcast(_snake_case )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(_snake_case )
state.print("""testing reduce_sum""" )
test_reduce_sum(_snake_case )
state.print("""testing reduce_mean""" )
test_reduce_mean(_snake_case )
if __name__ == "__main__":
main()
| 708 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
# Construct model
if gpta_config_file == "":
lowercase = GPTaConfig()
else:
lowercase = GPTaConfig.from_json_file(_A )
lowercase = GPTaModel(_A )
# Load weights from numpy
load_tf_weights_in_gpta(_A , _A , _A )
# Save pytorch-model
lowercase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , _A )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
lowercase_ : int = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 709 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = 13
lowercase = 7
lowercase = 30
lowercase = self.seq_length + self.mem_len
lowercase = 15
lowercase = True
lowercase = True
lowercase = 99
lowercase = [10, 50, 80]
lowercase = 32
lowercase = 32
lowercase = 4
lowercase = 8
lowercase = 128
lowercase = 2
lowercase = 2
lowercase = None
lowercase = 1
lowercase = 0
lowercase = 3
lowercase = self.vocab_size - 1
lowercase = 0.01
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _a ( self ) -> Tuple:
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = TFTransfoXLModel(UpperCamelCase__ )
lowercase , lowercase = model(UpperCamelCase__ ).to_tuple()
lowercase = {"""input_ids""": input_ids_a, """mems""": mems_a}
lowercase , lowercase = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = TFTransfoXLLMHeadModel(UpperCamelCase__ )
lowercase , lowercase = model(UpperCamelCase__ ).to_tuple()
lowercase = {"""input_ids""": input_ids_a, """labels""": lm_labels}
lowercase , lowercase = model(UpperCamelCase__ ).to_tuple()
lowercase , lowercase = model([input_ids_a, mems_a] ).to_tuple()
lowercase = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
lowercase , lowercase = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = TFTransfoXLForSequenceClassification(UpperCamelCase__ )
lowercase = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) = config_and_inputs
lowercase = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __UpperCamelCase (_A , _A , unittest.TestCase ):
__A = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__A = () if is_tf_available() else ()
__A = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__A = False
__A = False
__A = False
__A = False
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = TFTransfoXLModelTester(self )
lowercase = ConfigTester(self , config_class=UpperCamelCase__ , d_embed=37 )
def _a ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ) -> Tuple:
'''simple docstring'''
self.model_tester.set_seed()
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase__ )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
self.model_tester.set_seed()
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase__ )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase__ )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase = model.get_output_embeddings()
assert isinstance(UpperCamelCase__ , tf.keras.layers.Layer )
lowercase = model.get_bias()
assert name is None
else:
lowercase = model.get_output_embeddings()
assert x is None
lowercase = model.get_bias()
assert name is None
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = TFTransfoXLModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def _a ( self ) -> Tuple:
'''simple docstring'''
pass
@require_tf
class __UpperCamelCase (unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
lowercase = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase = model.generate(UpperCamelCase__ , max_length=200 , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
| 710 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowercase_ : str = '''sshleifer/bart-tiny-random'''
lowercase_ : List[str] = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> int:
'''simple docstring'''
return AutoConfig.from_pretrained(_A )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=_A )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=_A )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self ) -> Any:
'''simple docstring'''
with self.assertRaises(_A ):
create_student_by_copying_alternating_layers(_A , tempfile.mkdtemp() , e=_A , d=_A )
| 711 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 0 |
'''simple docstring'''
import copy
import re
class __UpperCamelCase :
__A = '''hp'''
__A = {}
__A = None
@classmethod
def _a ( cls , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = prefix
lowercase = defaults
cls.build_naming_info()
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if len(lowerCamelCase_ ) == 0:
return ""
lowercase = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase_ ) + 1 ):
lowercase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowercase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_lowerCAmelCase ):
lowercase = """"""
while integer != 0:
lowercase = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
lowercase = 0
while True:
lowercase = word + """#""" + int_to_alphabetic(lowerCamelCase_ )
if sword in info["reverse_short_word"]:
continue
else:
lowercase = sword
break
lowercase = short_word
lowercase = word
return short_word
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = param_name.split("""_""" )
lowercase = [TrialShortNamer.shortname_for_word(lowerCamelCase_ , lowerCamelCase_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowercase = ["""""", """_"""]
for separator in separators:
lowercase = separator.join(lowerCamelCase_ )
if shortname not in info["reverse_short_param"]:
lowercase = shortname
lowercase = param_name
return shortname
return param_name
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = TrialShortNamer.shortname_for_key(lowerCamelCase_ , lowerCamelCase_ )
lowercase = short_name
lowercase = param_name
@classmethod
def _a ( cls ) -> Tuple:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
lowercase = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
lowercase = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase_ , lowerCamelCase_ )
lowercase = info
@classmethod
def _a ( cls , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
lowercase = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowercase = cls.NAMING_INFO["""short_param"""][k]
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase = 1 if v else 0
lowercase = """""" if isinstance(lowerCamelCase_ , (int, float) ) else """-"""
lowercase = F"""{key}{sep}{v}"""
name.append(lowerCamelCase_ )
return "_".join(lowerCamelCase_ )
@classmethod
def _a ( cls , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowercase = []
else:
lowercase = repr.split("""_""" )
lowercase = {}
for value in values:
if "-" in value:
lowercase = value.split("""-""" )
else:
lowercase = re.sub("""[0-9.]""" , """""" , lowerCamelCase_ )
lowercase = float(re.sub("""[^0-9.]""" , """""" , lowerCamelCase_ ) )
lowercase = cls.NAMING_INFO["""reverse_short_param"""][p_k]
lowercase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowercase = cls.DEFAULTS[k]
return parameters
| 713 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
return str(A__ ) == str(A__ )[::-1]
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
return int(A__ ) + int(str(A__ )[::-1] )
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 1_0000 ):
lowercase = []
for num in range(1 , A__ ):
lowercase = 0
lowercase = num
while iterations < 50:
lowercase = sum_reverse(A__ )
iterations += 1
if is_palindrome(A__ ):
break
else:
lychrel_nums.append(A__ )
return len(A__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 714 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 0 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase (_UpperCamelCase ):
__A = (CMStochasticIterativeScheduler,)
__A = 10
def _a ( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**_lowerCAmelCase )
return config
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = 10
lowercase = self.get_scheduler_config()
lowercase = self.scheduler_classes[0](**_lowerCAmelCase )
scheduler.set_timesteps(_lowerCAmelCase )
lowercase = scheduler.timesteps[0]
lowercase = scheduler.timesteps[1]
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a ( self ) -> Any:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = 1
scheduler.set_timesteps(_lowerCAmelCase )
lowercase = scheduler.timesteps
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_lowerCAmelCase ):
# 1. scale model input
lowercase = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict noise residual
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 3. predict previous sample x_t-1
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = [106, 0]
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
lowercase = scheduler.timesteps
lowercase = torch.manual_seed(0 )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase = scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict noise residual
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 3. predict previous sample x_t-1
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = [39, 30, 12, 15, 0]
with self.assertRaises(_lowerCAmelCase , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = [39, 30, 12, 1, 0]
lowercase = len(_lowerCAmelCase )
with self.assertRaises(_lowerCAmelCase , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowerCAmelCase , timesteps=_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCAmelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowerCAmelCase )
| 715 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
lowercase_ : str = [True] * 100_0001
lowercase_ : int = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
lowercase_ : int = False
i += 1
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
return seive[n]
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
return any(digit in """02468""" for digit in str(a__ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict = 100_0000 ):
lowercase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(a__ ) and not contains_an_even_digit(a__ ):
lowercase = str(a__ )
lowercase = [int(str_num[j:] + str_num[:j] ) for j in range(len(a__ ) )]
if all(is_prime(a__ ) for i in list_nums ):
result.append(a__ )
return result
def SCREAMING_SNAKE_CASE ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 716 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowercase = 0
lowercase = str(lowercase_ )
while len(lowercase_ ) != 1:
lowercase = [int(lowercase_ ) for i in num_string]
lowercase = 1
for i in range(0 , len(lowercase_ ) ):
total *= numbers[i]
lowercase = str(lowercase_ )
steps += 1
return steps
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
if not isinstance(lowercase_ , lowercase_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowercase = 0
lowercase = str(lowercase_ )
while len(lowercase_ ) != 1:
lowercase = [int(lowercase_ ) for i in num_string]
lowercase = 0
for i in range(0 , len(lowercase_ ) ):
total += numbers[i]
lowercase = str(lowercase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Tuple ):
return int((input_a, input_a).count(0 ) == 0 )
def SCREAMING_SNAKE_CASE ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 719 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : list , lowercase_ : int , lowercase_ : int = 0 , lowercase_ : int = 0 ):
lowercase = right or len(__a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__a , __a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Union[str, Any] ):
lowercase = 0
if start < end:
lowercase = randint(lowercase_ , lowercase_ )
lowercase = a[end]
lowercase = a[pivot]
lowercase = temp
lowercase = _in_place_partition(lowercase_ , lowercase_ , lowercase_ )
count += _in_place_quick_sort(lowercase_ , lowercase_ , p - 1 )
count += _in_place_quick_sort(lowercase_ , p + 1 , lowercase_ )
return count
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : List[str] ):
lowercase = 0
lowercase = randint(lowercase_ , lowercase_ )
lowercase = a[end]
lowercase = a[pivot]
lowercase = temp
lowercase = start - 1
for index in range(lowercase_ , lowercase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase = new_pivot_index + 1
lowercase = a[new_pivot_index]
lowercase = a[index]
lowercase = temp
lowercase = a[new_pivot_index + 1]
lowercase = a[end]
lowercase = temp
return new_pivot_index + 1, count
lowercase_ : Tuple = TemporaryFile()
lowercase_ : Tuple = 100 # 1000 elements are to be sorted
lowercase_ : int = 0, 1 # mean and standard deviation
lowercase_ : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
lowercase_ : Any = np.load(outfile)
lowercase_ : Optional[int] = len(M) - 1
lowercase_ : Dict = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 721 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
return str(lowercase_ ) == str(lowercase_ )[::-1]
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
return int(lowercase_ ) + int(str(lowercase_ )[::-1] )
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 1_0000 ):
lowercase = []
for num in range(1 , lowercase_ ):
lowercase = 0
lowercase = num
while iterations < 50:
lowercase = sum_reverse(lowercase_ )
iterations += 1
if is_palindrome(lowercase_ ):
break
else:
lychrel_nums.append(lowercase_ )
return len(lowercase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : Tuple = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowercase_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase = model_type_to_module_name(_lowerCAmelCase )
lowercase = importlib.import_module(F""".{module_name}""" , """transformers.models""" )
try:
return getattr(_lowerCAmelCase , _lowerCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowerCAmelCase , """__name__""" , _lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase = importlib.import_module("""transformers""" )
if hasattr(_lowerCAmelCase , _lowerCAmelCase ):
return getattr(_lowerCAmelCase , _lowerCAmelCase )
return None
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : List[Any] = None , lowercase_ : Optional[Any] = False , lowercase_ : int = False , lowercase_ : Union[str, Any] = None , lowercase_ : List[str] = None , lowercase_ : Dict = None , lowercase_ : Tuple = False , **lowercase_ : Dict , ):
lowercase = get_file_from_repo(
_lowerCAmelCase , _lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , resume_download=_lowerCAmelCase , proxies=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , local_files_only=_lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(_lowerCAmelCase , encoding="""utf-8""" ) as reader:
return json.load(_lowerCAmelCase )
class __UpperCamelCase :
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_lowerCamelCase )
def _a ( cls , _lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = kwargs.pop("""config""" , _lowerCamelCase )
lowercase = kwargs.pop("""trust_remote_code""" , _lowerCamelCase )
lowercase = True
lowercase , lowercase = FeatureExtractionMixin.get_feature_extractor_dict(_lowerCamelCase , **_lowerCamelCase )
lowercase = config_dict.get("""feature_extractor_type""" , _lowerCamelCase )
lowercase = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
lowercase = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# It could be in `config.feature_extractor_type``
lowercase = getattr(_lowerCamelCase , """feature_extractor_type""" , _lowerCamelCase )
if hasattr(_lowerCamelCase , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
lowercase = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
lowercase = feature_extractor_class_from_name(_lowerCamelCase )
lowercase = feature_extractor_auto_map is not None
lowercase = feature_extractor_class is not None or type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING
lowercase = resolve_trust_remote_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if has_remote_code and trust_remote_code:
lowercase = get_class_from_dynamic_module(
_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop("""code_revision""" , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
lowercase = FEATURE_EXTRACTOR_MAPPING[type(_lowerCamelCase )]
return feature_extractor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
FEATURE_EXTRACTOR_MAPPING.register(_lowerCamelCase , _lowerCamelCase )
| 701 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 0 |
import heapq
import sys
import numpy as np
lowercase_ : int = tuple[int, int]
class __UpperCamelCase :
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = set()
def _a ( self ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def _a ( self ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__lowerCamelCase )
else:
# update
# print("update", item)
lowercase = []
(lowercase) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(lowercase) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _a ( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if item in self.set:
self.set.remove(__lowerCamelCase )
lowercase = []
(lowercase) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(lowercase) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.elements[0][1]
def _a ( self ) -> Any:
'''simple docstring'''
(lowercase) = heapq.heappop(self.elements )
self.set.remove(__lowerCamelCase )
return (priority, item)
def SCREAMING_SNAKE_CASE ( lowercase_ : TPos , lowercase_ : TPos ):
lowercase = np.array(lowerCamelCase_ )
lowercase = np.array(lowerCamelCase_ )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE ( lowercase_ : TPos , lowercase_ : TPos ):
return consistent_heuristic(lowerCamelCase_ , lowerCamelCase_ ) // t
def SCREAMING_SNAKE_CASE ( lowercase_ : TPos , lowercase_ : TPos ):
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE ( lowercase_ : TPos , lowercase_ : int , lowercase_ : TPos , lowercase_ : dict[TPos, float] ):
lowercase = g_function[start] + Wa * heuristics[i](lowerCamelCase_ , lowerCamelCase_ )
return ans
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ):
lowercase = np.chararray((n, n) )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
lowercase = '''*'''
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if (j, (n - 1) - i) in blocks:
lowercase = '''#'''
lowercase = '''-'''
lowercase = back_pointer[goal]
while x != start:
(lowercase) = x
# print(x)
lowercase = '''-'''
lowercase = back_pointer[x]
lowercase = '''-'''
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
lowercase = back_pointer[goal]
while x != start:
print(lowerCamelCase_ , end=""" """ )
lowercase = back_pointer[x]
print(lowerCamelCase_ )
sys.exit()
def SCREAMING_SNAKE_CASE ( lowercase_ : TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , ):
for itera in range(lowerCamelCase_ ):
open_list[itera].remove_element(lowerCamelCase_ )
# print("s", s)
# print("j", j)
(lowercase) = s
lowercase = (x - 1, y)
lowercase = (x + 1, y)
lowercase = (x, y + 1)
lowercase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCamelCase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCamelCase_ )
lowercase = -1
lowercase = float("""inf""" )
if valid(lowerCamelCase_ ) and g_function[neighbours] > g_function[s] + 1:
lowercase = g_function[s] + 1
lowercase = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCamelCase_ , key(lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCamelCase_ ):
if key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) <= Wa * key(
lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ ):
open_list[j].put(
lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowercase_ : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowercase_ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowercase_ : Optional[int] = make_common_ground()
lowercase_ : Optional[int] = blocks_blk
# hyper parameters
lowercase_ : List[Any] = 1
lowercase_ : str = 1
lowercase_ : Tuple = 20
lowercase_ : int = 3 # one consistent and two other inconsistent
# start and end destination
lowercase_ : Optional[int] = (0, 0)
lowercase_ : Optional[int] = (n - 1, n - 1)
lowercase_ : Union[str, Any] = 1
def SCREAMING_SNAKE_CASE ( lowercase_ : TPos , lowercase_ : TPos , lowercase_ : int ):
lowercase = {start: 0, goal: float("""inf""" )}
lowercase = {start: -1, goal: -1}
lowercase = []
lowercase = set()
for i in range(lowerCamelCase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCamelCase_ , key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) )
lowercase = []
lowercase = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCamelCase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
lowercase = open_list[i].top_show()
visited.add(lowerCamelCase_ )
expand_state(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
close_list_inad.append(lowerCamelCase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
lowercase = open_list[0].top_show()
visited.add(lowerCamelCase_ )
expand_state(
lowerCamelCase_ , 0 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
close_list_anchor.append(lowerCamelCase_ )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCamelCase_ ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 702 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(__SCREAMING_SNAKE_CASE , int(b / 2 ) )
else:
return a * actual_power(__SCREAMING_SNAKE_CASE , int(b / 2 ) ) * actual_power(__SCREAMING_SNAKE_CASE , int(b / 2 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Dict ):
if b < 0:
return 1 / actual_power(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return actual_power(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(power(-2, -3))
| 704 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = {}
lowercase = tokenizer(example["""content"""] , truncation=lowercase_ )["""input_ids"""]
lowercase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowercase_ : str = HfArgumentParser(PretokenizationArguments)
lowercase_ : int = parser.parse_args()
if args.num_workers is None:
lowercase_ : Union[str, Any] = multiprocessing.cpu_count()
lowercase_ : Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase_ : str = time.time()
lowercase_ : int = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowercase_ : Union[str, Any] = time.time()
lowercase_ : int = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowercase_ : Any = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 705 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase (__lowercase ):
def _a ( self ) -> List[str]:
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , """num_heads""" ) )
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=64 , _lowerCAmelCase=3 , _lowerCAmelCase=[16, 48, 96] , _lowerCAmelCase=[1, 3, 6] , _lowerCAmelCase=[1, 2, 10] , _lowerCAmelCase=[7, 3, 3] , _lowerCAmelCase=[4, 2, 2] , _lowerCAmelCase=[2, 1, 1] , _lowerCAmelCase=[2, 2, 2] , _lowerCAmelCase=[False, False, True] , _lowerCAmelCase=[0.0, 0.0, 0.0] , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=2 , ) -> Optional[Any]:
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_sizes
lowercase = patch_stride
lowercase = patch_padding
lowercase = is_training
lowercase = use_labels
lowercase = num_labels
lowercase = num_channels
lowercase = embed_dim
lowercase = num_heads
lowercase = stride_kv
lowercase = depth
lowercase = cls_token
lowercase = attention_drop_rate
lowercase = initializer_range
lowercase = layer_norm_eps
def _a ( self ) -> int:
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Dict:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
lowercase = CvtModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
lowercase = (self.image_size, self.image_size)
lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
lowercase = self.num_labels
lowercase = CvtForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> int:
lowercase = self.prepare_config_and_inputs()
lowercase = config_and_inputs
lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (__lowercase , __lowercase , unittest.TestCase ):
__A = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
lowercase = CvtModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> Tuple:
return
@unittest.skip(reason="""Cvt does not output attentions""" )
def _a ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def _a ( self ) -> Dict:
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def _a ( self ) -> int:
pass
def _a ( self ) -> Union[str, Any]:
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = len(self.model_tester.depth )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
pass
@slow
def _a ( self ) -> Optional[Any]:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = CvtModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self ) -> int:
lowercase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 706 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase_ : Any = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict=None ):
lowercase = XLNetConfig.from_json_file(_SCREAMING_SNAKE_CASE )
lowercase = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase = finetuning_task
lowercase = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase = XLNetForSequenceClassification(_SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
lowercase = finetuning_task
lowercase = XLNetForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
lowercase = XLNetLMHeadModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
lowercase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"""Save PyTorch model to {os.path.abspath(_SCREAMING_SNAKE_CASE )}""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F"""Save configuration file to {os.path.abspath(_SCREAMING_SNAKE_CASE )}""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
lowercase_ : List[Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 707 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 0 |
'''simple docstring'''
class __UpperCamelCase :
def __init__( self ) -> Dict:
'''simple docstring'''
lowercase = """"""
lowercase = """"""
lowercase = []
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase = self.__min_dist_top_down_dp(_lowerCAmelCase , n - 1 )
lowercase = self.__min_dist_top_down_dp(m - 1 , _lowerCAmelCase )
lowercase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase = 1 + min(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self.dp[m][n]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = worda
lowercase = worda
lowercase = [[-1 for _ in range(len(_lowerCAmelCase ) )] for _ in range(len(_lowerCAmelCase ) )]
return self.__min_dist_top_down_dp(len(_lowerCAmelCase ) - 1 , len(_lowerCAmelCase ) - 1 )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = worda
lowercase = worda
lowercase = len(_lowerCAmelCase )
lowercase = len(_lowerCAmelCase )
lowercase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase = j
elif j == 0: # second string is empty
lowercase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase = self.dp[i - 1][j - 1]
else:
lowercase = self.dp[i][j - 1]
lowercase = self.dp[i - 1][j]
lowercase = self.dp[i - 1][j - 1]
lowercase = 1 + min(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowercase_ : Optional[int] = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
lowercase_ : Tuple = input('''Enter the first string: ''').strip()
lowercase_ : Optional[int] = input('''Enter the second string: ''').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 708 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Optional[Any] ):
return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any] ):
lowercase = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict=7_0000 ):
lowercase = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase__ ):
lowercase = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = sigmoid_function(lowerCAmelCase__ )
lowercase = np.dot(x.T , h - y ) / y.size
lowercase = theta - alpha * gradient # updating the weights
lowercase = np.dot(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = sigmoid_function(lowerCAmelCase__ )
lowercase = cost_function(lowerCAmelCase__ , lowerCAmelCase__ )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowercase_ : Any = datasets.load_iris()
lowercase_ : List[Any] = iris.data[:, :2]
lowercase_ : Any = (iris.target != 0) * 1
lowercase_ : int = 0.1
lowercase_ : str = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
return sigmoid_function(
np.dot(lowerCAmelCase__ , lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((lowercase_) , (lowercase_)) : Optional[Any] = (x[:, 0].min(), x[:, 0].max())
((lowercase_) , (lowercase_)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((lowercase_) , (lowercase_)) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowercase_ : Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()]
lowercase_ : Optional[Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 709 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : List[Any] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 0 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class __UpperCamelCase (lowercase_ ):
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
lowercase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" )
lowercase = truncation
lowercase = tokenize_kwargs
lowercase = {}
if return_tensors is not None:
lowercase = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self , _lowerCAmelCase , **_lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = self.framework
lowercase = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model(**UpperCamelCase__ )
return model_outputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=False ) -> Any:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 711 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowercase_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class __UpperCamelCase (__UpperCAmelCase ):
def __init__( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , """vision""" )
self.check_model_type(lowerCAmelCase_ )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
if "text_queries" in kwargs:
lowercase = kwargs.pop("""text_queries""" )
if isinstance(lowerCAmelCase_ , (str, Image.Image) ):
lowercase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase = image
lowercase = super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
return results
def _a ( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = {}
if "threshold" in kwargs:
lowercase = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def _a ( self , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = load_image(inputs["""image"""] )
lowercase = inputs["""candidate_labels"""]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase = candidate_labels.split(""",""" )
lowercase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(lowerCAmelCase_ ):
lowercase = self.tokenizer(lowerCAmelCase_ , return_tensors=self.framework )
lowercase = self.image_processor(lowerCAmelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(lowerCAmelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _a ( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = model_inputs.pop("""target_size""" )
lowercase = model_inputs.pop("""candidate_label""" )
lowercase = model_inputs.pop("""is_last""" )
lowercase = self.model(**lowerCAmelCase_ )
lowercase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0.1 , _lowerCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
lowercase = []
for model_output in model_outputs:
lowercase = model_output["""candidate_label"""]
lowercase = BaseModelOutput(lowerCAmelCase_ )
lowercase = self.image_processor.post_process_object_detection(
outputs=lowerCAmelCase_ , threshold=lowerCAmelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
lowercase = outputs["""scores"""][index].item()
lowercase = self._get_bounding_box(outputs["""boxes"""][index][0] )
lowercase = {"""score""": score, """label""": label, """box""": box}
results.append(lowerCAmelCase_ )
lowercase = sorted(lowerCAmelCase_ , key=lambda _lowerCAmelCase : x["score"] , reverse=lowerCAmelCase_ )
if top_k:
lowercase = results[:top_k]
return results
def _a ( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
lowercase , lowercase , lowercase , lowercase = box.int().tolist()
lowercase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 712 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
# Initialise PyTorch model
lowercase = RemBertConfig.from_json_file(a_ )
print("""Building PyTorch model from configuration: {}""".format(str(a_ ) ) )
lowercase = RemBertModel(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(a_ , a_ , a_ )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(a_ ) )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 713 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : int=True , lowercase_ : str="pt" ):
lowercase = {"""add_prefix_space""": True} if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not line.startswith(""" """ ) else {}
lowercase = padding_side
return tokenizer(
[line] , max_length=UpperCAmelCase__ , padding="""max_length""" if pad_to_max_length else None , truncation=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str , lowercase_ : int=None , ):
lowercase = input_ids.ne(UpperCAmelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase (_a ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="train" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="" , ) -> str:
'''simple docstring'''
super().__init__()
lowercase = Path(_A ).joinpath(type_path + """.source""" )
lowercase = Path(_A ).joinpath(type_path + """.target""" )
lowercase = self.get_char_lens(self.src_file )
lowercase = max_source_length
lowercase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
lowercase = tokenizer
lowercase = prefix
if n_obs is not None:
lowercase = self.src_lens[:n_obs]
lowercase = src_lang
lowercase = tgt_lang
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = index + 1 # linecache starts at 1
lowercase = self.prefix + linecache.getline(str(self.src_file ) , _A ).rstrip("""\n""" )
lowercase = linecache.getline(str(self.tgt_file ) , _A ).rstrip("""\n""" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _A ) else self.tokenizer
)
lowercase = self.tokenizer.generator if isinstance(self.tokenizer , _A ) else self.tokenizer
lowercase = encode_line(_A , _A , self.max_source_length , """right""" )
lowercase = encode_line(_A , _A , self.max_target_length , """right""" )
lowercase = source_inputs["""input_ids"""].squeeze()
lowercase = target_inputs["""input_ids"""].squeeze()
lowercase = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return [len(_A ) for x in Path(_A ).open().readlines()]
def _a ( self , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = torch.stack([x["""input_ids"""] for x in batch] )
lowercase = torch.stack([x["""attention_mask"""] for x in batch] )
lowercase = torch.stack([x["""decoder_input_ids"""] for x in batch] )
lowercase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _A )
else self.tokenizer.pad_token_id
)
lowercase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _A )
else self.tokenizer.pad_token_id
)
lowercase = trim_batch(_A , _A )
lowercase , lowercase = trim_batch(_A , _A , attention_mask=_A )
lowercase = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
lowercase_ : Dict = getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
return list(itertools.chain.from_iterable(UpperCAmelCase__ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
lowercase = get_git_info()
save_json(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , """git_log.json""" ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[str]=4 , **lowercase_ : Optional[Any] ):
with open(UpperCAmelCase__ , """w""" ) as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ , indent=UpperCAmelCase__ , **UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
with open(UpperCAmelCase__ ) as f:
return json.load(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = git.Repo(search_parent_directories=UpperCAmelCase__ )
lowercase = {
"""repo_id""": str(UpperCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : List[str] ):
return list(map(UpperCAmelCase__ , UpperCAmelCase__ ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Optional[Any] ):
with open(UpperCAmelCase__ , """wb""" ) as f:
return pickle.dump(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
def remove_articles(lowercase_ : Optional[int] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , UpperCAmelCase__ )
def white_space_fix(lowercase_ : int ):
return " ".join(text.split() )
def remove_punc(lowercase_ : List[Any] ):
lowercase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Any ):
lowercase = normalize_answer(UpperCAmelCase__ ).split()
lowercase = normalize_answer(UpperCAmelCase__ ).split()
lowercase = Counter(UpperCAmelCase__ ) & Counter(UpperCAmelCase__ )
lowercase = sum(common.values() )
if num_same == 0:
return 0
lowercase = 1.0 * num_same / len(UpperCAmelCase__ )
lowercase = 1.0 * num_same / len(UpperCAmelCase__ )
lowercase = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : List[Any] ):
return normalize_answer(UpperCAmelCase__ ) == normalize_answer(UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : List[Any] ):
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
lowercase = 0
for hypo, pred in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
em += exact_match_score(UpperCAmelCase__ , UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
em /= len(UpperCAmelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return model_prefix.startswith("""rag""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str ):
lowercase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase = """dropout_rate"""
for p in extra_params:
if getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if not hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) and not hasattr(UpperCAmelCase__ , equivalent_param[p] ):
logger.info("""config doesn\'t have a `{}` attribute""".format(UpperCAmelCase__ ) )
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
continue
lowercase = p if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) else equivalent_param[p]
setattr(UpperCAmelCase__ , UpperCAmelCase__ , getattr(UpperCAmelCase__ , UpperCAmelCase__ ) )
delattr(UpperCAmelCase__ , UpperCAmelCase__ )
return hparams, config
| 714 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 715 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
lowercase_ : Tuple = tuple[int, int]
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = vertices
lowercase = {
(min(_lowerCAmelCase ), max(_lowerCAmelCase )): weight for edge, weight in edges.items()
}
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowercase = weight
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = Graph({min(self.vertices )} , {} )
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowercase = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowercase = edge
lowercase = weight
subgraph.add_edge(_lowerCAmelCase , _lowerCAmelCase )
return subgraph
def SCREAMING_SNAKE_CASE ( lowercase_ : str = "p107_network.txt" ):
lowercase = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
lowercase = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase = {}
lowercase = 42
lowercase = 42
lowercase = 42
with open(_UpperCamelCase ) as f:
lowercase = f.read().strip().split("""\n""" )
lowercase = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(_UpperCamelCase ) ):
for edgea in range(_UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
lowercase = int(adjaceny_matrix[edgea][edgea] )
lowercase = Graph(set(range(len(_UpperCamelCase ) ) ) , _UpperCamelCase )
lowercase = graph.prims_algorithm()
lowercase = sum(graph.edges.values() )
lowercase = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 716 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Union[str, Any] = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowercase_ : int = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def SCREAMING_SNAKE_CASE ( lowercase_ : str = "mumbai" ):
lowercase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
lowercase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
lowercase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 718 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 0 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : Tuple=7 ):
lowercase = None
if token is not None:
lowercase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase = '''636036'''
lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase = requests.get(snake_case_ , headers=snake_case_ ).json()
return result["workflow_runs"]
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
lowercase = get_daily_ci_runs(snake_case_ )
lowercase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase = workflow_run['''id''']
break
return workflow_run_id
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[str] ):
lowercase = get_last_daily_ci_runs(snake_case_ )
if workflow_run_id is not None:
lowercase = get_artifacts_links(worflow_run_id=snake_case_ , token=snake_case_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case_ , artifact_url=snake_case_ , output_dir=snake_case_ , token=snake_case_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : int , lowercase_ : str ):
get_last_daily_ci_artifacts(snake_case_ , snake_case_ , snake_case_ )
lowercase = {}
for artifact_name in artifact_names:
lowercase = os.path.join(snake_case_ , F"""{artifact_name}.zip""" )
if os.path.isfile(snake_case_ ):
lowercase = {}
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
with z.open(snake_case_ ) as f:
lowercase = f.read().decode("""UTF-8""" )
return results
| 719 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
lowercase = np.inf
def set_batch_size(lowercase_ : Optional[int] ) -> None:
nonlocal batch_size
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase = min(UpperCamelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase = min(UpperCamelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and feature.dtype == "binary":
lowercase = min(UpperCamelCase__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCamelCase__ , UpperCamelCase__ )
return None if batch_size is np.inf else batch_size
class __UpperCamelCase (lowercase_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(
_lowerCAmelCase , split=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , num_proc=_lowerCAmelCase , **_lowerCAmelCase , )
lowercase = path_or_paths if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else {self.split: path_or_paths}
lowercase = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
lowercase = Parquet(
cache_dir=_lowerCAmelCase , data_files=_lowerCAmelCase , features=_lowerCAmelCase , hash=_lowerCAmelCase , **_lowerCAmelCase , )
def _a ( self ) -> Tuple:
'''simple docstring'''
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , num_proc=self.num_proc , )
lowercase = self.builder.as_dataset(
split=self.split , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size or get_writer_batch_size(dataset.features )
lowercase = parquet_writer_kwargs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , """wb+""" ) as buffer:
lowercase = self._write(file_obj=_lowerCAmelCase , batch_size=_lowerCAmelCase , **self.parquet_writer_kwargs )
else:
lowercase = self._write(file_obj=self.path_or_buf , batch_size=_lowerCAmelCase , **self.parquet_writer_kwargs )
return written
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = 0
lowercase = parquet_writer_kwargs.pop("""path_or_buf""" , _lowerCAmelCase )
lowercase = self.dataset.features.arrow_schema
lowercase = pq.ParquetWriter(_lowerCAmelCase , schema=_lowerCAmelCase , **_lowerCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _lowerCAmelCase ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating parquet from Arrow format""" , ):
lowercase = query_table(
table=self.dataset._data , key=slice(_lowerCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 720 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __UpperCamelCase (_UpperCAmelCase ):
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , """num_attention_heads""" ) )
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=64 , _lowerCAmelCase=3 , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=16 , _lowerCAmelCase=[128, 256, 384] , _lowerCAmelCase=[4, 6, 8] , _lowerCAmelCase=[2, 3, 4] , _lowerCAmelCase=[16, 16, 16] , _lowerCAmelCase=0 , _lowerCAmelCase=[2, 2, 2] , _lowerCAmelCase=[2, 2, 2] , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=2 , ) -> int:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = kernel_size
lowercase = stride
lowercase = padding
lowercase = hidden_sizes
lowercase = num_attention_heads
lowercase = depths
lowercase = key_dim
lowercase = drop_path_rate
lowercase = patch_size
lowercase = attention_ratio
lowercase = mlp_ratio
lowercase = initializer_range
lowercase = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase = is_training
lowercase = use_labels
lowercase = num_labels
lowercase = initializer_range
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> List[str]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = LevitModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
lowercase = (self.image_size, self.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for _ in range(4 ):
lowercase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = LevitForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__A = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = LevitModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> Dict:
'''simple docstring'''
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def _a ( self ) -> Dict:
'''simple docstring'''
pass
def _a ( self ) -> Any:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = len(self.model_tester.depths ) + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
lowercase = (self.model_tester.image_size, self.model_tester.image_size)
lowercase , lowercase = image_size[0], image_size[1]
for _ in range(4 ):
lowercase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Dict:
'''simple docstring'''
pass
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Dict:
'''simple docstring'''
lowercase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def _a ( self ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase = model(**_lowerCAmelCase ).loss
loss.backward()
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase = False
lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase = model_class(_lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
lowercase = model(**_lowerCAmelCase ).loss
loss.backward()
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
lowercase = problem_type["""title"""]
lowercase = problem_type["""num_labels"""]
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if problem_type["num_labels"] > 1:
lowercase = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCAmelCase ) as warning_list:
lowercase = model(**_lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LevitModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> Any:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 721 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 0 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase_ : List[str] = logging.get_logger(__name__)
# General docstring
lowercase_ : str = '''PoolFormerConfig'''
# Base docstring
lowercase_ : Optional[int] = '''sail/poolformer_s12'''
lowercase_ : str = [1, 512, 7, 7]
# Image classification docstring
lowercase_ : int = '''sail/poolformer_s12'''
lowercase_ : Dict = '''tabby, tabby cat'''
lowercase_ : int = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : float = 0.0 , lowercase_ : bool = False ):
if drop_prob == 0.0 or not training:
return input
lowercase = 1 - drop_prob
lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowercase = keep_prob + torch.rand(lowercase_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowercase = input.div(lowercase_ ) * random_tensor
return output
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase = None ) -> None:
'''simple docstring'''
super().__init__()
lowercase = drop_prob
def _a ( self , _lowerCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return drop_path(_lowerCAmelCase , self.drop_prob , self.training )
def _a ( self ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase = patch_size if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
lowercase = stride if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (stride, stride)
lowercase = padding if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (padding, padding)
lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , kernel_size=_lowerCAmelCase , stride=_lowerCAmelCase , padding=_lowerCAmelCase )
lowercase = norm_layer(_lowerCAmelCase ) if norm_layer else nn.Identity()
def _a ( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = self.projection(_lowerCAmelCase )
lowercase = self.norm(_lowerCAmelCase )
return embeddings
class __UpperCamelCase (nn.GroupNorm ):
def __init__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(1 , _lowerCAmelCase , **_lowerCAmelCase )
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase = nn.AvgPoolad(_lowerCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.pool(_lowerCAmelCase ) - hidden_states
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
super().__init__()
lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
lowercase = PoolFormerDropPath(_lowerCAmelCase )
if isinstance(config.hidden_act , _lowerCAmelCase ):
lowercase = ACTaFN[config.hidden_act]
else:
lowercase = config.hidden_act
def _a ( self , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.conva(_lowerCAmelCase )
lowercase = self.act_fn(_lowerCAmelCase )
lowercase = self.drop(_lowerCAmelCase )
lowercase = self.conva(_lowerCAmelCase )
lowercase = self.drop(_lowerCAmelCase )
return hidden_states
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase = PoolFormerPooling(_lowerCAmelCase )
lowercase = PoolFormerOutput(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase = PoolFormerGroupNorm(_lowerCAmelCase )
lowercase = PoolFormerGroupNorm(_lowerCAmelCase )
# Useful for training neural nets
lowercase = PoolFormerDropPath(_lowerCAmelCase ) if drop_path > 0.0 else nn.Identity()
lowercase = config.use_layer_scale
if config.use_layer_scale:
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowerCAmelCase) ) , requires_grad=_lowerCAmelCase )
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowerCAmelCase) ) , requires_grad=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if self.use_layer_scale:
lowercase = self.pooling(self.before_norm(_lowerCAmelCase ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowercase = hidden_states + self.drop_path(_lowerCAmelCase )
lowercase = ()
lowercase = self.output(self.after_norm(_lowerCAmelCase ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowercase = hidden_states + self.drop_path(_lowerCAmelCase )
lowercase = (output,) + outputs
return outputs
else:
lowercase = self.drop_path(self.pooling(self.before_norm(_lowerCAmelCase ) ) )
# First residual connection
lowercase = pooling_output + hidden_states
lowercase = ()
# Second residual connection inside the PoolFormerOutput block
lowercase = self.drop_path(self.output(self.after_norm(_lowerCAmelCase ) ) )
lowercase = hidden_states + layer_output
lowercase = (output,) + outputs
return outputs
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase = config
# stochastic depth decay rule
lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowercase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowercase = nn.ModuleList(_lowerCAmelCase )
# Transformer blocks
lowercase = []
lowercase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowercase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_lowerCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_lowerCAmelCase ) )
lowercase = nn.ModuleList(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True ) -> Tuple:
'''simple docstring'''
lowercase = () if output_hidden_states else None
lowercase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowercase , lowercase = layers
# Get patch embeddings from hidden_states
lowercase = embedding_layer(_lowerCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(_lowerCAmelCase ):
lowercase = blk(_lowerCAmelCase )
lowercase = layer_outputs[0]
if output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
__A = PoolFormerConfig
__A = '''poolformer'''
__A = '''pixel_values'''
__A = True
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=False ) -> int:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = value
lowercase_ : Any = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase_ : List[str] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , _UpperCAmelCase , )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
lowercase = config
lowercase = PoolFormerEncoder(_lowerCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def _a ( self ) -> Tuple:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowercase = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
lowercase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase = nn.Linear(config.hidden_size , config.hidden_size )
def _a ( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = self.dense(_lowerCAmelCase )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , _UpperCAmelCase , )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
lowercase = config.num_labels
lowercase = PoolFormerModel(_lowerCAmelCase )
# Final norm
lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowercase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.poolformer(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
lowercase = outputs[0]
lowercase = self.classifier(self.norm(_lowerCAmelCase ).mean([-2, -1] ) )
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = """single_label_classification"""
else:
lowercase = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
| 700 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
__A = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__A = Features({'''image''': Image()} )
__A = Features({'''labels''': ClassLabel} )
__A = '''image'''
__A = '''labels'''
def _a ( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _lowerCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
lowercase = copy.deepcopy(self )
lowercase = self.label_schema.copy()
lowercase = features[self.label_column]
lowercase = label_schema
return task_template
@property
def _a ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 701 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''lxmert'''
__A = {}
def __init__( self , _lowerCAmelCase=3_0522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=9500 , _lowerCAmelCase=1600 , _lowerCAmelCase=400 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=9 , _lowerCAmelCase=5 , _lowerCAmelCase=5 , _lowerCAmelCase=2048 , _lowerCAmelCase=4 , _lowerCAmelCase=6.67 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Any:
'''simple docstring'''
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = num_qa_labels
lowercase = num_object_labels
lowercase = num_attr_labels
lowercase = l_layers
lowercase = x_layers
lowercase = r_layers
lowercase = visual_feat_dim
lowercase = visual_pos_dim
lowercase = visual_loss_normalizer
lowercase = task_matched
lowercase = task_mask_lm
lowercase = task_obj_predict
lowercase = task_qa
lowercase = visual_obj_loss
lowercase = visual_attr_loss
lowercase = visual_feat_loss
lowercase = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**_lowerCAmelCase )
| 702 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 0 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowercase_ : Dict = parser.parse_args()
lowercase_ : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 703 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 0 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ : List[str] = 16
lowercase_ : Optional[Any] = 32
def SCREAMING_SNAKE_CASE ( lowercase_ : Accelerator , lowercase_ : DatasetDict , lowercase_ : List[int] , lowercase_ : List[int] , lowercase_ : int = 16 ):
lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase = DatasetDict(
{
"""train""": dataset["""train"""].select(lowercase_ ),
"""validation""": dataset["""train"""].select(lowercase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowercase_ : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 16
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
lowercase = DataLoader(
tokenized_datasets["""test"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Any ):
# New Code #
lowercase = []
# Download the dataset
lowercase = load_dataset("""glue""" , """mrpc""" )
# Create our splits
lowercase = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config["""lr"""]
lowercase = int(config["""num_epochs"""] )
lowercase = int(config["""seed"""] )
lowercase = int(config["""batch_size"""] )
lowercase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase = batch_size // MAX_GPU_BATCH_SIZE
lowercase = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
# New Code #
# Create our folds:
lowercase = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
lowercase = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowercase_ ):
lowercase , lowercase , lowercase = get_fold_dataloaders(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase = model(**lowercase_ )
lowercase = outputs.loss
lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**lowercase_ )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase_ )
# New Code #
# We also run predictions on the test set at the very end
lowercase = []
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**lowercase_ )
lowercase = outputs.logits
lowercase , lowercase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowercase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowercase = torch.cat(lowercase_ , dim=0 )
lowercase = torch.stack(lowercase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowercase = metric.compute(predictions=lowercase_ , references=lowercase_ )
accelerator.print("""Average test metrics from all folds:""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowercase_ , default=3 , help="""The number of splits to perform across the dataset""" )
lowercase = parser.parse_args()
lowercase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 704 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 0 |
import math
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
assert isinstance(lowercase_ , lowercase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowercase = range(3 , int(math.sqrt(lowercase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[str]=1 , **lowercase_ : List[str] ):
lowercase = factor * value
lowercase = value
while not is_prime(lowercase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowercase_ )
return value
| 705 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : str = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[Any] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[Any] = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowercase_ : Optional[Any] = trt.Logger(trt.Logger.WARNING)
lowercase_ : int = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowercase_ : str = parser.parse_args()
if args.tokenizer_name:
lowercase_ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowercase_ : int = args.per_device_eval_batch_size
lowercase_ : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowercase_ : Optional[Any] = True
lowercase_ : List[str] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowercase_ : Tuple = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowercase_ : str = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowercase_ : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowercase_ : List[Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowercase_ : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowercase_ : Any = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowercase_ : str = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowercase_ : List[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : str , lowercase_ : Any ):
lowercase = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowercase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowercase_ )
# start time
lowercase = time.time()
# Run inference
context.execute_async(
bindings=[int(lowercase_ ) for d_inp in d_inputs] + [int(lowercase_ ), int(lowercase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
cuda.memcpy_dtoh_async(lowercase_ , lowercase_ , lowercase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase = time.time()
lowercase = end_time - start_time
lowercase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowercase_ : List[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ : Tuple = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowercase_ : Optional[int] = raw_datasets['''validation'''].column_names
lowercase_ : Tuple = '''question''' if '''question''' in column_names else column_names[0]
lowercase_ : int = '''context''' if '''context''' in column_names else column_names[1]
lowercase_ : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowercase_ : Union[str, Any] = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowercase_ : str = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowercase_ , stride=args.doc_stride , return_overflowing_tokens=lowercase_ , return_offsets_mapping=lowercase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase = tokenized_examples.sequence_ids(lowercase_ )
lowercase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowercase_ : List[Any] = raw_datasets['''validation''']
# Validation Feature Creation
lowercase_ : Tuple = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowercase_ : List[str] = default_data_collator
lowercase_ : Any = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowercase_ : Optional[int] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase = postprocess_qa_predictions(
examples=lowercase_ , features=lowercase_ , predictions=lowercase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowercase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowercase_ , label_ids=lowercase_ )
lowercase_ : str = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
return trt.volume(engine.get_binding_shape(lowercase_ ) ) * engine.get_binding_dtype(lowercase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowercase_ : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowercase_ : Optional[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowercase_ : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowercase_ : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
lowercase_ : List[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowercase_ : List[Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowercase_ : List[Any] = 0.0
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = timeit.default_timer()
lowercase_ : int = None
for step, batch in enumerate(eval_dataloader):
lowercase_ : Tuple = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowercase_ : Union[str, Any] = outputs
lowercase_ : Dict = torch.tensor(start_logits)
lowercase_ : str = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowercase_ : Optional[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
lowercase_ : Optional[int] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
lowercase_ : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowercase_ : Any = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
lowercase_ : Dict = nested_truncate(all_preds, len(eval_dataset))
lowercase_ : List[str] = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
lowercase_ : Optional[int] = post_processing_function(eval_examples, eval_dataset, all_preds)
lowercase_ : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 707 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = None
__A = None
__A = None
__A = None
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=512 , _lowerCAmelCase="cls" , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = project_dim
lowercase = pooler_fn
lowercase = learn_encoder
lowercase = use_attention_mask
class __UpperCamelCase (_UpperCAmelCase ):
__A = [R'''pooler''', R'''logit_scale''']
__A = [R'''position_ids''', R'''predictions.decoder.bias''']
__A = '''roberta'''
__A = RobertaSeriesConfig
def __init__( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
lowercase = XLMRobertaModel(_lowerCAmelCase )
lowercase = nn.Linear(config.hidden_size , config.project_dim )
lowercase = getattr(_lowerCAmelCase , """has_pre_transformation""" , _lowerCAmelCase )
if self.has_pre_transformation:
lowercase = nn.Linear(config.hidden_size , config.project_dim )
lowercase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _a ( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> Any:
'''simple docstring'''
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.base_model(
input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , position_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase , inputs_embeds=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_attentions=_lowerCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_lowerCAmelCase , )
if self.has_pre_transformation:
lowercase = outputs["""hidden_states"""][-2]
lowercase = self.pre_LN(_lowerCAmelCase )
lowercase = self.transformation_pre(_lowerCAmelCase )
return TransformationModelOutput(
projection_state=_lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowercase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_lowerCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 708 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 0 |
import math
import sys
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = """"""
try:
with open(lowercase_ , """rb""" ) as binary_file:
lowercase = binary_file.read()
for dat in data:
lowercase = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {"""0""": """0""", """1""": """1"""}
lowercase , lowercase = """""", """"""
lowercase = len(lowercase_ )
for i in range(len(lowercase_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase = lexicon[curr_string]
result += last_match_id
lowercase = last_match_id + """0"""
if math.loga(lowercase_ ).is_integer():
lowercase = {}
for curr_key in list(lowercase_ ):
lowercase = lexicon.pop(lowercase_ )
lowercase = new_lex
lowercase = last_match_id + """1"""
index += 1
lowercase = """"""
return result
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = 8
try:
with open(lowercase_ , """wb""" ) as opened_file:
lowercase = [
to_write[i : i + byte_length]
for i in range(0 , len(lowercase_ ) , lowercase_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowercase_ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase = data_bits[counter:]
lowercase = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = read_file_binary(lowercase_ )
lowercase = remove_prefix(lowercase_ )
lowercase = decompress_data(lowercase_ )
write_file_binary(lowercase_ , lowercase_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 709 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ : Any = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = AlbertTokenizer
__A = AlbertTokenizerFast
__A = True
__A = True
__A = True
def _a ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = AlbertTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self , _lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = """this is a test"""
lowercase = """this is a test"""
return input_text, output_text
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = """<pad>"""
lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(_lowerCAmelCase ) , 3_0000 )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def _a ( self ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = """I was born in 92000, and this is falsé."""
lowercase = tokenizer.tokenize(_lowerCAmelCase )
lowercase = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
lowercase = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(_lowerCAmelCase )
lowercase = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = AlbertTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [48, 25, 21, 1289] )
lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowercase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowercase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = AlbertTokenizer(_lowerCAmelCase )
lowercase = tokenizer.encode("""sequence builders""" )
lowercase = tokenizer.encode("""multi-sequence build""" )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 710 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 711 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 0 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : str = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''efficientformer'''
def __init__( self , _lowerCAmelCase = [3, 2, 6, 4] , _lowerCAmelCase = [48, 96, 224, 448] , _lowerCAmelCase = [True, True, True, True] , _lowerCAmelCase = 448 , _lowerCAmelCase = 32 , _lowerCAmelCase = 4 , _lowerCAmelCase = 7 , _lowerCAmelCase = 5 , _lowerCAmelCase = 8 , _lowerCAmelCase = 4 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 16 , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = 3 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1 , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 1E-5 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = 224 , _lowerCAmelCase = 1E-05 , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = hidden_sizes
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = patch_size
lowercase = num_channels
lowercase = depths
lowercase = mlp_expansion_ratio
lowercase = downsamples
lowercase = dim
lowercase = key_dim
lowercase = attention_ratio
lowercase = resolution
lowercase = pool_size
lowercase = downsample_patch_size
lowercase = downsample_stride
lowercase = downsample_pad
lowercase = drop_path_rate
lowercase = num_metaad_blocks
lowercase = distillation
lowercase = use_layer_scale
lowercase = layer_scale_init_value
lowercase = image_size
lowercase = batch_norm_eps
| 712 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ):
lowercase = []
for line in lines:
lowercase = re.sub(R"""#.*""" , """""" , lowercase_ ) # remove comments
if line:
filtered_lines.append(lowercase_ )
lowercase = """\n""".join(lowercase_ )
# Make a hash from all this code
lowercase = full_str.encode("""utf-8""" )
return shaaaa(lowercase_ ).hexdigest()
# get importable module names and hash for caching
lowercase_ : str = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowercase_ : List[Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowercase_ : List[str] = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
lowercase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 713 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
__A = 42
__A = None
__A = None
lowercase_ : str = namedtuple('''CoinsDistribResult''', '''moves excess''')
def SCREAMING_SNAKE_CASE ( lowercase_ : TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(lowercase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase_ ) != count_coins(lowercase_ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(lowercase_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase , lowercase = get_distrib(node.left )
lowercase , lowercase = get_distrib(node.right )
lowercase = 1 - left_distrib_excess
lowercase = 1 - right_distrib_excess
lowercase = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase_ )
+ abs(lowercase_ )
)
lowercase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase_ , lowercase_ )
return get_distrib(lowercase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : list[int] , lowercase_ : list[int] ):
# Check if the input is valid
if not len(lowercase_ ) == len(lowercase_ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
lowercase , lowercase , lowercase = equationa
lowercase , lowercase , lowercase = equationa
# Calculate the determinants of the matrices
lowercase = aa * ba - aa * ba
lowercase = ca * ba - ca * ba
lowercase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowercase = determinant_x / determinant
lowercase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 716 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = model.config
lowercase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
if "encoder.model" in name:
lowercase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase = """encoder.""" + name
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = int(key_split[5] )
lowercase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Optional[int]=None , lowercase_ : Dict=False ):
# load original model
lowercase = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
lowercase , lowercase = get_configs(lowercase_ )
lowercase = DonutSwinModel(lowercase_ )
lowercase = MBartForCausalLM(lowercase_ )
lowercase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
lowercase = original_model.state_dict()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
lowercase = load_dataset("""hf-internal-testing/example-documents""" )
lowercase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
lowercase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase = DonutProcessor(lowercase_ , lowercase_ )
lowercase = processor(lowercase_ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase = """When is the coffee break?"""
lowercase = task_prompt.replace("""{user_input}""" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase = original_model.encoder.model.patch_embed(lowercase_ )
lowercase , lowercase = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
lowercase = original_model.encoder(lowercase_ )
lowercase = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
lowercase = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
lowercase = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowercase_ : Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 717 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
# TODO Update this
lowercase_ : str = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''esm'''
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1026 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , mask_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = emb_layer_norm_before
lowercase = token_dropout
lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase = EsmFoldConfig()
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = EsmFoldConfig(**_lowerCAmelCase )
lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase = get_default_vocab_list()
else:
lowercase = vocab_list
else:
lowercase = None
lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , _lowerCAmelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = super().to_dict()
if isinstance(self.esmfold_config , _lowerCAmelCase ):
lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class __UpperCamelCase :
__A = None
__A = True
__A = False
__A = False
__A = False
__A = 0
__A = True
__A = False
__A = 128
__A = None
def _a ( self ) -> int:
'''simple docstring'''
if self.trunk is None:
lowercase = TrunkConfig()
elif isinstance(self.trunk , _lowerCAmelCase ):
lowercase = TrunkConfig(**self.trunk )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = asdict(self )
lowercase = self.trunk.to_dict()
return output
@dataclass
class __UpperCamelCase :
__A = 48
__A = 1024
__A = 128
__A = 32
__A = 32
__A = 32
__A = 0
__A = 0
__A = False
__A = 4
__A = 128
__A = None
def _a ( self ) -> int:
'''simple docstring'''
if self.structure_module is None:
lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , _lowerCAmelCase ):
lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase = self.sequence_state_dim // self.sequence_head_width
lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = asdict(self )
lowercase = self.structure_module.to_dict()
return output
@dataclass
class __UpperCamelCase :
__A = 384
__A = 128
__A = 16
__A = 128
__A = 12
__A = 4
__A = 8
__A = 0.1
__A = 8
__A = 1
__A = 2
__A = 7
__A = 10
__A = 1e-8
__A = 1e5
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return asdict(self )
def SCREAMING_SNAKE_CASE ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 718 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
for param in module.parameters():
lowercase = False
def SCREAMING_SNAKE_CASE ( ):
lowercase = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = plt.imshow(lowercase_ )
fig.axes.get_xaxis().set_visible(lowercase_ )
fig.axes.get_yaxis().set_visible(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( ):
lowercase = datetime.now()
lowercase = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 719 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 1000 ):
lowercase , lowercase = 1, 1
lowercase = []
for i in range(1 , n + 1 ):
lowercase = prev_numerator + 2 * prev_denominator
lowercase = prev_numerator + prev_denominator
if len(str(lowercase_ ) ) > len(str(lowercase_ ) ):
result.append(lowercase_ )
lowercase = numerator
lowercase = denominator
return len(lowercase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 720 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowercase_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 721 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 0 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowercase_ : List[str] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : str ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = _TestCommandArgs(dataset=lowercase_ , all_configs=lowercase_ , save_infos=lowercase_ )
lowercase = TestCommand(*lowercase_ )
test_command.run()
lowercase = os.path.join(lowercase_ , """README.md""" )
assert os.path.exists(lowercase_ )
lowercase = DatasetInfosDict.from_directory(lowercase_ )
lowercase = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 235_1563,
"""num_examples""": 1_0000,
},
{
"""name""": """validation""",
"""num_bytes""": 23_8418,
"""num_examples""": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase , lowercase = getattr(dataset_infos["""default"""] , lowercase_ ), getattr(expected_dataset_infos["""default"""] , lowercase_ )
if key == "num_bytes":
assert is_apercent_close(lowercase_ , lowercase_ )
elif key == "splits":
assert list(lowercase_ ) == list(lowercase_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 700 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ : Optional[int] = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[str]=False , lowercase_ : str=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowercase , lowercase , lowercase , lowercase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase = cached_file(lowercase_ , lowercase_ , force_download=not use_cached_models )
lowercase = config_class.from_json_file(lowercase_ )
lowercase = True
lowercase = True
print(F"""Building TensorFlow model from configuration: {config}""" )
lowercase = model_class(lowercase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase = cached_file(
lowercase_ , lowercase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase = load_pytorch_checkpoint_in_tfa_model(lowercase_ , lowercase_ )
if compare_with_pt_model:
lowercase = tf_model(tf_model.dummy_inputs , training=lowercase_ ) # build the network
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
lowercase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowercase_ , config=lowercase_ , state_dict=lowercase_ )
with torch.no_grad():
lowercase = pt_model(**pt_model.dummy_inputs )
lowercase = pto[0].numpy()
lowercase = tfo[0].numpy()
lowercase = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(lowercase_ , save_format="""h5""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : int , lowercase_ : Tuple=None , lowercase_ : Optional[int]=None , lowercase_ : int=False , lowercase_ : List[str]=False , lowercase_ : Dict=False , lowercase_ : List[Any]=False , ):
if args_model_type is None:
lowercase = list(MODEL_CLASSES.keys() )
else:
lowercase = [args_model_type]
for j, model_type in enumerate(lowercase_ , start=1 ):
print("""=""" * 100 )
print(F""" Converting model type {j}/{len(lowercase_ )}: {model_type}""" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
lowercase , lowercase , lowercase , lowercase , lowercase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowercase_ , lowercase_ ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
lowercase = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(lowercase_ )}: {model_shortcut_name} - model_type {model_type}""" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
lowercase = cached_file(lowercase_ , lowercase_ , force_download=not use_cached_models )
else:
lowercase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase = cached_file(lowercase_ , lowercase_ , force_download=not use_cached_models )
else:
lowercase = model_shortcut_name
if os.path.isfile(lowercase_ ):
lowercase = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=lowercase_ , pytorch_checkpoint_path=lowercase_ , config_file=lowercase_ , tf_dump_path=os.path.join(lowercase_ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=lowercase_ , )
if remove_cached_files:
os.remove(lowercase_ )
os.remove(lowercase_ )
if __name__ == "__main__":
lowercase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
lowercase_ : Union[str, Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 701 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''beit'''
def __init__( self , _lowerCAmelCase=8192 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=[3, 5, 7, 11] , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=256 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=255 , **_lowerCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = use_mask_token
lowercase = use_absolute_position_embeddings
lowercase = use_relative_position_bias
lowercase = use_shared_relative_position_bias
lowercase = layer_scale_init_value
lowercase = drop_path_rate
lowercase = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase = out_indices
lowercase = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase = use_auxiliary_head
lowercase = auxiliary_loss_weight
lowercase = auxiliary_channels
lowercase = auxiliary_num_convs
lowercase = auxiliary_concat_input
lowercase = semantic_loss_ignore_index
class __UpperCamelCase (_UpperCAmelCase ):
__A = version.parse('''1.11''' )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self ) -> float:
'''simple docstring'''
return 1E-4
| 702 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
if isinstance(lowercase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class __UpperCamelCase (_UpperCAmelCase ):
__A = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = size if size is not None else {"""shortest_edge""": 224}
lowercase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
'''simple docstring'''
lowercase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
lowercase = get_resize_output_image_size(_lowerCAmelCase , size["""shortest_edge"""] , default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
lowercase = (size["""height"""], size["""width"""])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
'''simple docstring'''
lowercase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase = to_numpy_array(_lowerCAmelCase )
if do_resize:
lowercase = self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase )
if do_center_crop:
lowercase = self.center_crop(_lowerCAmelCase , size=_lowerCAmelCase )
if do_rescale:
lowercase = self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase )
if do_normalize:
lowercase = self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase )
lowercase = to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase )
return image
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowercase = make_batched(_lowerCAmelCase )
lowercase = [
[
self._preprocess_image(
image=_lowerCAmelCase , do_resize=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , do_center_crop=_lowerCAmelCase , crop_size=_lowerCAmelCase , do_rescale=_lowerCAmelCase , rescale_factor=_lowerCAmelCase , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , data_format=_lowerCAmelCase , )
for img in video
]
for video in videos
]
lowercase = {"""pixel_values""": videos}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 703 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 704 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase_ : Union[str, Any] = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowercase_ : Any = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowercase_ : List[Any] = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase (datasets.Metric ):
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = 0.0
for i, j in zip(_lowerCAmelCase , _lowerCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(_lowerCAmelCase , _lowerCAmelCase ) else 0.0
lowercase = n_correct / len(_lowerCAmelCase )
return {
"accuracy": accuracy,
}
| 705 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : Any = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = ['''ConditionalDetrFeatureExtractor''']
lowercase_ : Union[str, Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = True
lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = FlaxRobertaModelTester(self )
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained("""roberta-base""" , from_pt=_lowerCAmelCase )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 707 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowercase_ : Optional[Any] = False
class __UpperCamelCase (unittest.TestCase ):
def _a ( self , _lowerCAmelCase=32 ) -> Union[str, Any]:
'''simple docstring'''
set_seed(0 )
lowercase = UNetaDModel(sample_size=_lowerCAmelCase , in_channels=3 , out_channels=3 )
lowercase = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=_lowerCAmelCase , )
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=_lowerCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_lowerCAmelCase ) for _ in range(4 )]
lowercase = [torch.randn((4, 3, 32, 32) ).to(_lowerCAmelCase ) for _ in range(4 )]
lowercase = [torch.randint(0 , 1000 , (4,) ).long().to(_lowerCAmelCase ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase , lowercase = self.get_model_optimizer(resolution=32 )
model.train().to(_lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase = model(_lowerCAmelCase , timesteps[i] ).sample
lowercase = torch.nn.functional.mse_loss(_lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase , lowercase = self.get_model_optimizer(resolution=32 )
model.train().to(_lowerCAmelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase = model(_lowerCAmelCase , timesteps[i] ).sample
lowercase = torch.nn.functional.mse_loss(_lowerCAmelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
| 708 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 0 |
import numpy as np
lowercase_ : str = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class __UpperCamelCase :
def __init__( self ) -> None:
'''simple docstring'''
lowercase = np.array(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> np.ndarray:
'''simple docstring'''
lowercase , lowercase = np.where(letter == self.SQUARE )
lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = message.lower()
lowercase = message.replace(""" """ , """""" )
lowercase = message.replace("""j""" , """i""" )
lowercase = np.empty((2, len(_lowerCAmelCase )) )
for letter_index in range(len(_lowerCAmelCase ) ):
lowercase = self.letter_to_numbers(message[letter_index] )
lowercase = numbers[0]
lowercase = numbers[1]
lowercase = first_step.reshape(2 * len(_lowerCAmelCase ) )
lowercase = """"""
for numbers_index in range(len(_lowerCAmelCase ) ):
lowercase = int(second_step[numbers_index * 2] )
lowercase = int(second_step[(numbers_index * 2) + 1] )
lowercase = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
lowercase = encoded_message + letter
return encoded_message
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = message.lower()
message.replace(""" """ , """""" )
lowercase = np.empty(2 * len(_lowerCAmelCase ) )
for letter_index in range(len(_lowerCAmelCase ) ):
lowercase = self.letter_to_numbers(message[letter_index] )
lowercase = numbers[0]
lowercase = numbers[1]
lowercase = first_step.reshape((2, len(_lowerCAmelCase )) )
lowercase = """"""
for numbers_index in range(len(_lowerCAmelCase ) ):
lowercase = int(second_step[0, numbers_index] )
lowercase = int(second_step[1, numbers_index] )
lowercase = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
lowercase = decoded_message + letter
return decoded_message
| 709 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
lowercase_ : List[str] = '''Muhammad Umer Farooq'''
lowercase_ : List[str] = '''MIT'''
lowercase_ : str = '''1.0.0'''
lowercase_ : Dict = '''Muhammad Umer Farooq'''
lowercase_ : Optional[Any] = '''[email protected]'''
lowercase_ : Any = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase ) -> None:
'''simple docstring'''
super().__init__()
lowercase = []
lowercase = domain
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase = parse.urljoin(self.domain , _lowerCAmelCase )
self.urls.append(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return ".".join(get_sub_domain_name(lowercase_ ).split(""".""" )[-2:] )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
return parse.urlparse(lowercase_ ).netloc
def SCREAMING_SNAKE_CASE ( lowercase_ : str = "https://github.com" ):
lowercase = get_domain_name(lowercase_ )
# Initialize the parser
lowercase = Parser(lowercase_ )
try:
# Open URL
lowercase = requests.get(lowercase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase = requests.get(lowercase_ )
# Get the valid email.
lowercase = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowercase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowercase_ )
if __name__ == "__main__":
lowercase_ : int = emails_from_url('''https://github.com''')
print(f'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 710 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase_ : Dict = logging.get_logger(__name__)
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 711 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=10 , _lowerCAmelCase=3 , _lowerCAmelCase=32 * 4 , _lowerCAmelCase=32 * 6 , _lowerCAmelCase=4 , _lowerCAmelCase=32 , ) -> Optional[int]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = is_training
lowercase = use_auxiliary_loss
lowercase = num_queries
lowercase = num_channels
lowercase = min_size
lowercase = max_size
lowercase = num_labels
lowercase = mask_feature_size
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCAmelCase )
lowercase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCAmelCase )
lowercase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCAmelCase ) > 0.5
).float()
lowercase = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCAmelCase ) > 0.5).long()
lowercase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _a ( self ) -> List[Any]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase , lowercase = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = output.encoder_hidden_states
lowercase = output.pixel_decoder_hidden_states
lowercase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCAmelCase ) , config.decoder_config.decoder_layers )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase = MaskFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = MaskFormerForInstanceSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
def comm_check_on_output(_lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase = model(pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
lowercase = model(
pixel_values=_lowerCAmelCase , pixel_mask=_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
comm_check_on_output(_lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__A = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> int:
'''simple docstring'''
lowercase = MaskFormerModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def _a ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_lowerCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def _a ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def _a ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> int:
'''simple docstring'''
pass
def _a ( self ) -> str:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase = MaskFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = (self.model_tester.min_size,) * 2
lowercase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_lowerCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_lowerCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=_lowerCAmelCase ).long(),
}
lowercase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_lowerCAmelCase )
lowercase = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def _a ( self ) -> str:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_lowerCAmelCase , **_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase ).to(_lowerCAmelCase )
lowercase = model(**_lowerCAmelCase , output_attentions=_lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def _a ( self ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase = self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase ).loss
loss.backward()
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = True
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
lowercase = model(_lowerCAmelCase , mask_labels=_lowerCAmelCase , class_labels=_lowerCAmelCase )
lowercase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase_ : Dict = 1e-4
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
lowercase = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
lowercase = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
lowercase = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_lowerCAmelCase )
.eval()
)
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# masks_queries_logits
lowercase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
lowercase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
lowercase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(_lowerCAmelCase )
.eval()
)
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# masks_queries_logits
lowercase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
lowercase = torch.tensor(_lowerCAmelCase ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
# class_queries_logits
lowercase = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowercase = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCAmelCase , atol=_lowerCAmelCase ) )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_lowerCAmelCase )
.eval()
)
lowercase = self.default_image_processor
lowercase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowercase = inputs["""pixel_values"""].to(_lowerCAmelCase )
lowercase = [el.to(_lowerCAmelCase ) for el in inputs["""mask_labels"""]]
lowercase = [el.to(_lowerCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 712 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 0 |
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 2 ):
lowercase = qubits
# Using Aer's simulator
lowercase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
lowercase = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowercase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowercase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowercase_ ) ) , list(range(lowercase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowercase = qiskit.execute(lowercase_ , lowercase_ , shots=1000 )
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 713 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 0 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase (_UpperCAmelCase ):
__A = (UnCLIPScheduler,)
def _a ( self , **_lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_lowerCAmelCase )
return config
def _a ( self ) -> str:
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_lowerCAmelCase , prev_timestep=_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase = scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=_lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_lowerCAmelCase ) - -0.001_0011 < 1E-5
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(25 )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(_lowerCAmelCase ):
# 1. predict noise residual
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
lowercase = None
else:
lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , prev_timestep=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(_lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def _a ( self ) -> Tuple:
'''simple docstring'''
pass
def _a ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 714 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 0 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 715 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
lowercase = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
lowercase = """hf-internal-testing/ngram-beam-search-decoder"""
def _a ( self , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.get_tokenizer()
lowercase = self.get_feature_extractor()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = floats_list((3, 1000) )
lowercase = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = """This is a test string"""
lowercase = processor(text=_lowerCAmelCase )
lowercase = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self , _lowerCAmelCase=(2, 10, 16) , _lowerCAmelCase=77 ) -> List[Any]:
'''simple docstring'''
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowercase = processor.decode(_lowerCAmelCase )
lowercase = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowercase = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
lowercase = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
lowercase = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
lowercase = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
lowercase , lowercase , lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits()
lowercase = 15
lowercase = -20.0
lowercase = -4.0
lowercase = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
lowercase = decoded_processor_out.text
lowercase = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
lowercase = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
lowercase = [d[0][0] for d in decoded_decoder_out]
lowercase = [d[0][2] for d in decoded_decoder_out]
lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1E-3 ) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits()
lowercase = 2.0
lowercase = 5.0
lowercase = -20.0
lowercase = True
lowercase = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
lowercase = decoded_processor_out.text
lowercase = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
lowercase = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = processor.decoder.model_container[processor.decoder._model_key]
lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
lowercase = os.listdir(_lowerCAmelCase )
lowercase = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = snapshot_download("""hf-internal-testing/processor_with_lm""" )
lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
lowercase = processor.decoder.model_container[processor.decoder._model_key]
lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
lowercase = os.listdir(_lowerCAmelCase )
lowercase = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = floats_list((3, 1000) )
lowercase = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
lowercase = self._get_dummy_logits()
lowercase = processor_wavaveca.batch_decode(_lowerCAmelCase )
lowercase = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = [d[key] for d in offsets]
return retrieved_list
def _a ( self ) -> str:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = self._get_dummy_logits()[0]
lowercase = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = self._get_dummy_logits()
lowercase = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _a ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowercase = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
lowercase = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6000 ) )
lowercase = iter(_lowerCAmelCase )
lowercase = next(_lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
lowercase = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowercase = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
lowercase = model(_lowerCAmelCase ).logits.cpu().numpy()
lowercase = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowercase = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
lowercase = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
lowercase = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
lowercase = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
lowercase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
lowercase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 716 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=True , _lowerCAmelCase=1 / 255 , _lowerCAmelCase=True , ) -> Any:
'''simple docstring'''
lowercase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_pad
def _a ( self ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=False ) -> int:
'''simple docstring'''
if not batched:
lowercase = image_inputs[0]
if isinstance(_lowerCAmelCase , Image.Image ):
lowercase , lowercase = image.size
else:
lowercase , lowercase = image.shape[1], image.shape[2]
if w < h:
lowercase = int(self.size["""shortest_edge"""] * h / w )
lowercase = self.size["""shortest_edge"""]
elif w > h:
lowercase = self.size["""shortest_edge"""]
lowercase = int(self.size["""shortest_edge"""] * w / h )
else:
lowercase = self.size["""shortest_edge"""]
lowercase = self.size["""shortest_edge"""]
else:
lowercase = []
for image in image_inputs:
lowercase , lowercase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0]
lowercase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = YolosImageProcessor if is_vision_available() else None
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = YolosImageProcessingTester(self )
@property
def _a ( self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCAmelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
lowercase = self.image_processing_class(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase , do_rescale=_lowerCAmelCase )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase = image_processing_a.pad(_lowerCAmelCase , return_tensors="""pt""" )
lowercase = image_processing_a(_lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) )
@slow
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowercase = json.loads(f.read() )
lowercase = {"""image_id""": 3_9769, """annotations""": target}
# encode them
lowercase = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowercase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCAmelCase )
lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify area
lowercase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCAmelCase ) )
# verify boxes
lowercase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCAmelCase )
lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCAmelCase , atol=1E-3 ) )
# verify image_id
lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCAmelCase ) )
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCAmelCase ) )
# verify class_labels
lowercase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCAmelCase ) )
# verify orig_size
lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCAmelCase ) )
# verify size
lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCAmelCase ) )
@slow
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowercase = json.loads(f.read() )
lowercase = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
lowercase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowercase = YolosImageProcessor(format="""coco_panoptic""" )
lowercase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
lowercase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _lowerCAmelCase )
lowercase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify area
lowercase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowerCAmelCase ) )
# verify boxes
lowercase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowerCAmelCase )
lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowerCAmelCase , atol=1E-3 ) )
# verify image_id
lowercase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowerCAmelCase ) )
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowerCAmelCase ) )
# verify class_labels
lowercase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowerCAmelCase ) )
# verify masks
lowercase = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowerCAmelCase )
# verify orig_size
lowercase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowerCAmelCase ) )
# verify size
lowercase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowerCAmelCase ) )
| 717 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Dict = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''rwkv'''
__A = {'''max_position_embeddings''': '''context_length'''}
def __init__( self , _lowerCAmelCase=5_0277 , _lowerCAmelCase=1024 , _lowerCAmelCase=4096 , _lowerCAmelCase=32 , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=6 , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
lowercase = vocab_size
lowercase = context_length
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase = layer_norm_epsilon
lowercase = rescale_every
lowercase = use_cache
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(
tie_word_embeddings=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 718 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 0 |
'''simple docstring'''
import re
import subprocess
import sys
lowercase_ : Union[str, Any] = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowercase_ : Tuple = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
)
lowercase_ : Optional[int] = '''|'''.join(sys.argv[1:])
lowercase_ : str = re.compile(rf'''^({joined_dirs}).*?\.py$''')
lowercase_ : str = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 719 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=False , ) -> Optional[Any]:
'''simple docstring'''
lowercase = size if size is not None else {"""height""": 20, """width""": 20}
lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_reduce_labels
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE ( ):
lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowercase = Image.open(dataset[0]["""file"""] )
lowercase = Image.open(dataset[1]["""file"""] )
return image, map
def SCREAMING_SNAKE_CASE ( ):
lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
lowercase = Image.open(ds[0]["""file"""] )
lowercase = Image.open(ds[1]["""file"""] )
lowercase = Image.open(ds[2]["""file"""] )
lowercase = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = BeitImageProcessor if is_vision_available() else None
def _a ( self ) -> int:
'''simple docstring'''
lowercase = BeitImageProcessingTester(self )
@property
def _a ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCAmelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
pass
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
lowercase = []
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
lowercase , lowercase = prepare_semantic_single_inputs()
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
lowercase , lowercase = prepare_semantic_batch_inputs()
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowercase , lowercase = prepare_semantic_single_inputs()
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
lowercase = True
lowercase = image_processing(_lowerCAmelCase , _lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 720 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''nllb-moe'''
__A = ['''past_key_values''']
__A = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCAmelCase=12_8112 , _lowerCAmelCase=1024 , _lowerCAmelCase=12 , _lowerCAmelCase=4096 , _lowerCAmelCase=16 , _lowerCAmelCase=12 , _lowerCAmelCase=4096 , _lowerCAmelCase=16 , _lowerCAmelCase=0.05 , _lowerCAmelCase=0.05 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=1024 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="float32" , _lowerCAmelCase=False , _lowerCAmelCase=128 , _lowerCAmelCase=64 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=0.001 , _lowerCAmelCase=0.001 , _lowerCAmelCase="all" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=1.0 , _lowerCAmelCase=0.2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=False , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = router_z_loss_coef
lowercase = router_aux_loss_coef
lowercase = decoder_sparse_step
lowercase = encoder_sparse_step
lowercase = num_experts
lowercase = expert_capacity
lowercase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowercase = router_dtype
lowercase = router_ignore_padding_tokens
lowercase = batch_prioritized_routing
lowercase = second_expert_policy
lowercase = normalize_router_prob_before_dropping
lowercase = moe_eval_capacity_token_fraction
lowercase = moe_token_dropout
lowercase = output_router_logits
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 721 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 100_0000 , lowercase_ : int = 10 ):
lowercase = defaultdict(lowercase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowercase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowercase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 700 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def SCREAMING_SNAKE_CASE ( ):
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase = [1, 2, 3]
with pytest.raises(lowercase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowercase_ , lowercase_ , num_proc=2 )
with pytest.raises(lowercase_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(lowercase_ , lowercase_ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
lowercase = [1, 2]
lowercase = {"""a""": 1, """b""": 2}
lowercase = {"""a""": [1, 2], """b""": [3, 4]}
lowercase = {"""a""": {"""1""": 1}, """b""": 2}
lowercase = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowercase = [2, 3]
lowercase = {"""a""": 2, """b""": 3}
lowercase = {"""a""": [2, 3], """b""": [4, 5]}
lowercase = {"""a""": {"""1""": 2}, """b""": 3}
lowercase = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
assert map_nested(lowercase_ , lowercase_ , num_proc=lowercase_ ) == expected_map_nested_sa
| 701 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ) -> Optional[int]:
'''simple docstring'''
lowercase = size if size is not None else {"""shortest_edge""": 20}
lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_flip_channel_order
def _a ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = MobileViTImageProcessor if is_vision_available() else None
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = MobileViTImageProcessingTester(self )
@property
def _a ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_flip_channel_order""" ) )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowercase = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 702 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : list , lowercase_ : list , lowercase_ : int ):
lowercase = len(lowercase_ )
lowercase = [[0] * n for i in range(lowercase_ )]
for i in range(lowercase_ ):
lowercase = y_points[i]
for i in range(2 , lowercase_ ):
for j in range(lowercase_ , lowercase_ ):
lowercase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase_ : Dict = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
lowercase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 705 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
def get_matched_characters(lowercase_ : str , lowercase_ : str ) -> str:
lowercase = []
lowercase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase = int(max(0 , i - limit ) )
lowercase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowercase_ )
lowercase = F"""{_stra[0:_stra.index(lowercase_ )]} {_stra[_stra.index(lowercase_ ) + 1:]}"""
return "".join(lowercase_ )
# matching characters
lowercase = get_matched_characters(lowercase_ , lowercase_ )
lowercase = get_matched_characters(lowercase_ , lowercase_ )
lowercase = len(lowercase_ )
# transposition
lowercase = (
len([(ca, ca) for ca, ca in zip(lowercase_ , lowercase_ ) if ca != ca] ) // 2
)
if not match_count:
lowercase = 0.0
else:
lowercase = (
1
/ 3
* (
match_count / len(lowercase_ )
+ match_count / len(lowercase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 706 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
lowercase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
lowercase = InstructBlipProcessor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _a ( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).tokenizer
def _a ( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).image_processor
def _a ( self , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ).qformer_tokenizer
def _a ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase = self.get_image_processor(do_normalize=_lowerCAmelCase , padding_value=1.0 )
lowercase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(images=_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = processor(text=_lowerCAmelCase )
lowercase = tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
lowercase = qformer_tokenizer(_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(_lowerCAmelCase )
lowercase = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = self.get_qformer_tokenizer()
lowercase = InstructBlipProcessor(
tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase , qformer_tokenizer=_lowerCAmelCase )
lowercase = """lower newer"""
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 707 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_multiple_size
lowercase = hidden_act
lowercase = hidden_dropout
lowercase = attention_dropout
lowercase = weight_tying
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def _a ( self ) -> List[Any]:
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase = self.prepare_config_and_inputs()
lowercase = True
return config, input_ids, input_mask, token_labels
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = GPTNeoXJapaneseModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = True
lowercase = GPTNeoXJapaneseModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = True
lowercase = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
lowercase = output_from_no_past["""hidden_states"""][0]
lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
__A = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
__A = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = GPTNeoXJapaneseModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase = None
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCAmelCase )
@slow
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """abeja/gpt-neox-japanese-2.7b"""
lowercase = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
lowercase = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
lowercase = GPTNeoXJapaneseTokenizer.from_pretrained(_lowerCAmelCase )
lowercase = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowerCAmelCase )
lowercase = []
for prompt in prompts:
lowercase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" ).input_ids
lowercase = model.generate(_lowerCAmelCase , max_length=50 )
lowercase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 708 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : float ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : float , lowercase_ : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ : Optional[int] = 16
lowercase_ : Tuple = 32
def SCREAMING_SNAKE_CASE ( lowercase_ : Accelerator , lowercase_ : int = 16 ):
lowercase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_ : int ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 16
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
lowercase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ : Any = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Dict ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1":
lowercase = 2
# Initialize accelerator
lowercase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config["""lr"""]
lowercase = int(config["""num_epochs"""] )
lowercase = int(config["""seed"""] )
lowercase = int(config["""batch_size"""] )
lowercase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowercase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase = batch_size // MAX_GPU_BATCH_SIZE
lowercase = MAX_GPU_BATCH_SIZE
set_seed(lowercase_ )
lowercase , lowercase = get_dataloaders(lowercase_ , lowercase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase = model(**lowercase_ )
lowercase = outputs.loss
lowercase = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
lowercase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**lowercase_ )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowercase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
lowercase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase = parser.parse_args()
lowercase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 712 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase (metaclass=_UpperCAmelCase ):
__A = ['''torch''', '''torchsde''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
| 713 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
if not isinstance(lowercase_ , lowercase_ ):
lowercase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase_ )
if number < 0:
return False
lowercase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 0 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=sys.maxsize ) -> Dict:
'''simple docstring'''
lowercase = """bilinear"""
lowercase = max_size
lowercase = short_edge_length
def __call__( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = []
for img in imgs:
lowercase , lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase = size * 1.0 / min(_lowerCAmelCase , _lowerCAmelCase )
if h < w:
lowercase , lowercase = size, scale * w
else:
lowercase , lowercase = scale * h, size
if max(_lowerCAmelCase , _lowerCAmelCase ) > self.max_size:
lowercase = self.max_size * 1.0 / max(_lowerCAmelCase , _lowerCAmelCase )
lowercase = newh * scale
lowercase = neww * scale
lowercase = int(neww + 0.5 )
lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase = Image.fromarray(_lowerCAmelCase )
lowercase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase = np.asarray(_lowerCAmelCase )
else:
lowercase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase = nn.functional.interpolate(
_lowerCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_lowerCAmelCase ).squeeze(0 )
img_augs.append(_lowerCAmelCase )
return img_augs
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase = cfg.INPUT.FORMAT
lowercase = cfg.SIZE_DIVISIBILITY
lowercase = cfg.PAD_VALUE
lowercase = cfg.INPUT.MAX_SIZE_TEST
lowercase = cfg.MODEL.DEVICE
lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = lambda _lowerCAmelCase : (x - self.pixel_mean) / self.pixel_std
def _a ( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = tuple(max(_lowerCAmelCase ) for s in zip(*[img.shape for img in images] ) )
lowercase = [im.shape[-2:] for im in images]
lowercase = [
nn.functional.pad(
_lowerCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_lowerCAmelCase , _lowerCAmelCase )
]
return torch.stack(_lowerCAmelCase ), torch.tensor(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase=False ) -> Dict:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = [images]
if single_image:
assert len(_lowerCAmelCase ) == 1
for i in range(len(_lowerCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_lowerCAmelCase , images.pop(_lowerCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_lowerCAmelCase , torch.as_tensor(img_tensorize(images.pop(_lowerCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase = torch.tensor([im.shape[:2] for im in images] )
lowercase = self.aug(_lowerCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase = [self.normalizer(_lowerCAmelCase ) for x in images]
# now pad them to do the following operations
lowercase , lowercase = self.pad(_lowerCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase = torch.true_divide(_lowerCAmelCase , _lowerCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Dict ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Tuple[int, int] ):
assert torch.isfinite(lowercase_ ).all(), "Box tensor contains infinite or NaN!"
lowercase , lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowercase_ )
tensor[:, 1].clamp_(min=0 , max=lowercase_ )
tensor[:, 2].clamp_(min=0 , max=lowercase_ )
tensor[:, 3].clamp_(min=0 , max=lowercase_ )
| 715 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> int:
'''simple docstring'''
lowercase = parent
lowercase = 13
lowercase = 7
lowercase = True
lowercase = True
lowercase = True
lowercase = True
lowercase = 99
lowercase = 384
lowercase = 2
lowercase = 4
lowercase = 37
lowercase = """gelu"""
lowercase = 0.1
lowercase = 0.1
lowercase = 512
lowercase = 16
lowercase = 2
lowercase = 0.02
lowercase = 3
lowercase = 4
lowercase = 128
lowercase = 2
lowercase = 9
lowercase = 1
lowercase = None
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = TFConvBertModel(config=_lowerCAmelCase )
lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase = [input_ids, input_mask]
lowercase = model(_lowerCAmelCase )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase = TFConvBertForMaskedLM(config=_lowerCAmelCase )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase = self.num_labels
lowercase = TFConvBertForSequenceClassification(config=_lowerCAmelCase )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = self.num_choices
lowercase = TFConvBertForMultipleChoice(config=_lowerCAmelCase )
lowercase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase = self.num_labels
lowercase = TFConvBertForTokenClassification(config=_lowerCAmelCase )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = TFConvBertForQuestionAnswering(config=_lowerCAmelCase )
lowercase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A = False
__A = False
__A = False
def _a ( self ) -> int:
'''simple docstring'''
lowercase = TFConvBertModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
lowercase = True
if hasattr(_lowerCAmelCase , """use_cache""" ):
lowercase = True
lowercase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
lowercase = model_class(_lowerCAmelCase )
lowercase = len(model(_lowerCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase , saved_model=_lowerCAmelCase )
lowercase = os.path.join(_lowerCAmelCase , """saved_model""" , """1""" )
lowercase = tf.keras.models.load_model(_lowerCAmelCase )
lowercase = model(_lowerCAmelCase )
if self.is_encoder_decoder:
lowercase = outputs["""encoder_hidden_states"""]
lowercase = outputs["""encoder_attentions"""]
else:
lowercase = outputs["""hidden_states"""]
lowercase = outputs["""attentions"""]
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
lowercase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowercase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
lowercase = getattr(self.model_tester , """key_length""" , _lowerCAmelCase )
def check_decoder_attentions_output(_lowerCAmelCase ):
lowercase = len(_lowerCAmelCase )
self.assertEqual(out_len % 2 , 0 )
lowercase = outputs.decoder_attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowerCAmelCase ):
lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase = True
lowercase = False
lowercase = model_class(_lowerCAmelCase )
lowercase = model(self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = len(_lowerCAmelCase )
self.assertEqual(config.output_hidden_states , _lowerCAmelCase )
check_encoder_attentions_output(_lowerCAmelCase )
if self.is_encoder_decoder:
lowercase = model_class(_lowerCAmelCase )
lowercase = model(self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCAmelCase )
check_decoder_attentions_output(_lowerCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(_lowerCAmelCase )
lowercase = model(self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCAmelCase )
check_encoder_attentions_output(_lowerCAmelCase )
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(_lowerCAmelCase )
lowercase = model(self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowerCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _lowerCAmelCase )
check_encoder_attentions_output(_lowerCAmelCase )
@require_tf
class __UpperCamelCase (unittest.TestCase ):
@slow
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase = model(_lowerCAmelCase )[0]
lowercase = [1, 6, 768]
self.assertEqual(output.shape , _lowerCAmelCase )
lowercase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
| 716 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
from string import ascii_uppercase
lowercase_ : List[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
lowercase_ : List[Any] = dict(enumerate(ascii_uppercase))
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = len(lowercase_ )
lowercase = 0
while True:
if x == i:
lowercase = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = """"""
lowercase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = """"""
lowercase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def SCREAMING_SNAKE_CASE ( ):
lowercase = """THE GERMAN ATTACK"""
lowercase = """SECRET"""
lowercase = generate_key(lowercase_ , lowercase_ )
lowercase = cipher_text(lowercase_ , lowercase_ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(lowercase_ , lowercase_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.