code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :str = logging.get_logger(__name__)
# TODO Update this
lowercase__ :List[str] = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] ='''esm'''
def __init__( self ,A__=None ,A__=None ,A__=None ,A__=7_6_8 ,A__=1_2 ,A__=1_2 ,A__=3_0_7_2 ,A__=0.1 ,A__=0.1 ,A__=1_0_2_6 ,A__=0.02 ,A__=1E-12 ,A__="absolute" ,A__=True ,A__=None ,A__=False ,A__=False ,A__=None ,A__=None ,**A__ ,):
super().__init__(pad_token_id=A__ ,mask_token_id=A__ ,**A__)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = emb_layer_norm_before
lowercase = token_dropout
lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''')
lowercase = EsmFoldConfig()
elif isinstance(A__ ,A__):
lowercase = EsmFoldConfig(**A__)
lowercase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''')
lowercase = get_default_vocab_list()
else:
lowercase = vocab_list
else:
lowercase = None
lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,'''use_esm_attn_map''' ,A__):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''')
def A__ ( self):
lowercase = super().to_dict()
if isinstance(self.esmfold_config ,A__):
lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase :
lowercase_ : str =None
lowercase_ : bool =True
lowercase_ : bool =False
lowercase_ : bool =False
lowercase_ : bool =False
lowercase_ : float =0
lowercase_ : bool =True
lowercase_ : bool =False
lowercase_ : int =128
lowercase_ : "TrunkConfig" =None
def A__ ( self):
if self.trunk is None:
lowercase = TrunkConfig()
elif isinstance(self.trunk ,A__):
lowercase = TrunkConfig(**self.trunk)
def A__ ( self):
lowercase = asdict(self)
lowercase = self.trunk.to_dict()
return output
@dataclass
class lowercase :
lowercase_ : int =48
lowercase_ : int =1024
lowercase_ : int =128
lowercase_ : int =32
lowercase_ : int =32
lowercase_ : int =32
lowercase_ : float =0
lowercase_ : float =0
lowercase_ : bool =False
lowercase_ : int =4
lowercase_ : Optional[int] =128
lowercase_ : "StructureModuleConfig" =None
def A__ ( self):
if self.structure_module is None:
lowercase = StructureModuleConfig()
elif isinstance(self.structure_module ,A__):
lowercase = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.')
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f' {self.sequence_state_dim} and {self.sequence_state_dim}.')
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.')
lowercase = self.sequence_state_dim // self.sequence_head_width
lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.')
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.')
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.')
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.')
def A__ ( self):
lowercase = asdict(self)
lowercase = self.structure_module.to_dict()
return output
@dataclass
class lowercase :
lowercase_ : int =384
lowercase_ : int =128
lowercase_ : int =16
lowercase_ : int =128
lowercase_ : int =12
lowercase_ : int =4
lowercase_ : int =8
lowercase_ : float =0.1
lowercase_ : int =8
lowercase_ : int =1
lowercase_ : int =2
lowercase_ : int =7
lowercase_ : int =10
lowercase_ : float =1e-8
lowercase_ : float =1e5
def A__ ( self):
return asdict(self)
def UpperCamelCase ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 101 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def lowercase ( _snake_case : Any , _snake_case : List[Any] ) ->int:
"""simple docstring"""
__snake_case : str = a.name
__snake_case : Any = b.name
__snake_case : int = ''''''
__snake_case : Tuple = ''''''
__snake_case : str = a == b
__snake_case : str = name_a
__snake_case : Optional[Any] = name_b
return res
def lowercase ( _snake_case : Dict , _snake_case : List[str] , _snake_case : List[Any] ) ->Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_snake_case , _snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , _snake_case , _snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _snake_case , _snake_case )
def lowercase ( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : List[Any] ) ->List[Any]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_snake_case , _snake_case , _snake_case )
def lowercase ( _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Dict ) ->int:
"""simple docstring"""
__snake_case : int = list(model.graph.initializer )
__snake_case : Union[str, Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : List[str] = inits[i].name
__snake_case : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _snake_case , _snake_case )
def lowercase ( _snake_case : List[str] ) ->str:
"""simple docstring"""
__snake_case : Union[str, Any] = os.path.dirname(_snake_case )
__snake_case : Optional[Any] = os.path.basename(_snake_case )
__snake_case : Any = onnx.load(os.path.join(_snake_case , _snake_case ) )
__snake_case : str = list(model.graph.initializer )
__snake_case : Tuple = set()
__snake_case : List[str] = {}
__snake_case : Optional[int] = []
__snake_case : Optional[Any] = 0
for i in range(len(_snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_snake_case )
dup_set.add(_snake_case )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , _snake_case )
total_reduced_size += mem_size
__snake_case : Optional[Any] = inits[i].name
__snake_case : Dict = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_snake_case )
else:
__snake_case : Union[str, Any] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
__snake_case : Any = sorted(_snake_case )
_remove_dup_initializers_from_model(_snake_case , _snake_case , _snake_case )
__snake_case : List[Any] = '''optimized_''' + model_file_name
__snake_case : Any = os.path.join(_snake_case , _snake_case )
onnx.save(_snake_case , _snake_case )
return new_model
| 102 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[Any] = torch.device('cpu')
def lowerCamelCase ():
__a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = dct.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = []
for k in state_dict.keys():
__a : List[Any] = k
if ".pwconv" in k:
__a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[str] = 1_000
__a : Tuple = 'huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : Dict = [3, 3, 6, 4]
__a : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Dict = [3, 3, 9, 6]
__a : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : Dict = [4, 3, 10, 5]
__a : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Tuple = [4, 4, 12, 6]
__a : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[Any] = checkpoint
__a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Tuple = prepare_img()
__a : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 27 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Optional[Any] , A_ : Dict , A_ : Optional[Any]=2 , A_ : List[str]=True , A_ : Dict=False , A_ : Union[str, Any]=1_0 , A_ : Optional[Any]=3 , A_ : str=3_2 * 8 , A_ : List[str]=3_2 * 8 , A_ : Dict=4 , A_ : List[Any]=6_4 , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : int = use_auxiliary_loss
lowerCAmelCase_ : str = num_queries
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Union[str, Any] = min_size
lowerCAmelCase_ : Optional[int] = max_size
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : str = hidden_dim
lowerCAmelCase_ : List[str] = hidden_dim
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
A_)
lowerCAmelCase_ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A_)
lowerCAmelCase_ : List[str] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A_) > 0.5
).float()
lowerCAmelCase_ : Any = (torch.rand((self.batch_size, self.num_labels) , device=A_) > 0.5).long()
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase_ : Dict = self.num_queries
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : str = [1, 1, 1, 1]
lowerCAmelCase_ : Dict = self.num_channels
lowerCAmelCase_ : List[str] = 6_4
lowerCAmelCase_ : Union[str, Any] = 1_2_8
lowerCAmelCase_ : str = self.hidden_dim
lowerCAmelCase_ : Optional[Any] = self.hidden_dim
lowerCAmelCase_ : Any = self.hidden_dim
return config
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[Any] , A_ : Tuple):
lowerCAmelCase_ : Any = output.encoder_hidden_states
lowerCAmelCase_ : int = output.pixel_decoder_hidden_states
lowerCAmelCase_ : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(A_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(A_) , config.decoder_layers)
def UpperCAmelCase__ ( self : Optional[int] , A_ : int , A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : str=False):
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = MaskaFormerModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_)
lowerCAmelCase_ : Any = model(A_ , output_hidden_states=A_)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(A_ , A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[str] , A_ : List[Any] , A_ : Tuple , A_ : Any):
lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(config=A_)
model.to(A_)
model.eval()
def comm_check_on_output(A_ : Optional[int]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(pixel_values=A_ , pixel_mask=A_)
lowerCAmelCase_ : List[Any] = model(A_)
comm_check_on_output(A_)
lowerCAmelCase_ : Any = model(
pixel_values=A_ , pixel_mask=A_ , mask_labels=A_ , class_labels=A_)
comm_check_on_output(A_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_a = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Tuple = MaskaFormerModelTester(self)
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_)
def UpperCAmelCase__ ( self : Optional[int]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_)
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''')
def UpperCAmelCase__ ( self : Optional[Any]):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''')
def UpperCAmelCase__ ( self : str):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''')
def UpperCAmelCase__ ( self : int):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''')
def UpperCAmelCase__ ( self : Tuple):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase__ ( self : List[str]):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase__ ( self : Tuple):
pass
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(A_)
lowerCAmelCase_ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase_ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A_)
@slow
def UpperCAmelCase__ ( self : List[str]):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase_ : List[str] = MaskaFormerModel.from_pretrained(A_)
self.assertIsNotNone(A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : Optional[int] = (self.model_tester.min_size,) * 2
lowerCAmelCase_ : str = {
'''pixel_values''': torch.randn((2, 3, *size) , device=A_),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=A_),
'''class_labels''': torch.zeros(2 , 1_0 , device=A_).long(),
}
lowerCAmelCase_ : Union[str, Any] = self.model_tester.get_config()
lowerCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(A_).to(A_)
lowerCAmelCase_ : Union[str, Any] = model(**A_)
self.assertTrue(outputs.loss is not None)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ , **A_ , output_hidden_states=A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(A_).to(A_)
lowerCAmelCase_ : List[str] = model(**A_ , output_attentions=A_)
self.assertTrue(outputs.attentions is not None)
def UpperCAmelCase__ ( self : List[Any]):
if not self.model_tester.is_training:
return
lowerCAmelCase_ : Dict = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = model_class(A_)
model.to(A_)
model.train()
lowerCAmelCase_ : List[Any] = model(A_ , mask_labels=A_ , class_labels=A_).loss
loss.backward()
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = self.all_model_classes[1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Any = model_class(A_).to(A_)
model.train()
lowerCAmelCase_ : List[str] = model(A_ , mask_labels=A_ , class_labels=A_)
lowerCAmelCase_ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase_ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
A__ : int = 1E-4
def UpperCamelCase( ):
lowerCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Tuple):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self : Optional[Any]):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(A_)
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Tuple = prepare_img()
lowerCAmelCase_ : Any = image_processor(A_ , return_tensors='''pt''').to(A_)
lowerCAmelCase_ : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**A_)
lowerCAmelCase_ : List[str] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_))
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A_ , atol=A_))
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]]).to(A_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A_ , atol=A_))
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval()
lowerCAmelCase_ : Optional[int] = self.default_image_processor
lowerCAmelCase_ : List[Any] = prepare_img()
lowerCAmelCase_ : Tuple = image_processor(A_ , return_tensors='''pt''').to(A_)
lowerCAmelCase_ : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(A_ , (1, 3, 3_8_4, 3_8_4))
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**A_)
# masks_queries_logits
lowerCAmelCase_ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
lowerCAmelCase_ : Tuple = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowerCAmelCase_ : Optional[Any] = torch.tensor(A_).to(A_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A_ , atol=A_))
# class_queries_logits
lowerCAmelCase_ : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
lowerCAmelCase_ : Any = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
]).to(A_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A_ , atol=A_))
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(A_).eval()
lowerCAmelCase_ : Optional[Any] = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='''pt''' , )
lowerCAmelCase_ : Dict = inputs['''pixel_values'''].to(A_)
lowerCAmelCase_ : Tuple = [el.to(A_) for el in inputs['''mask_labels''']]
lowerCAmelCase_ : str = [el.to(A_) for el in inputs['''class_labels''']]
with torch.no_grad():
lowerCAmelCase_ : int = model(**A_)
self.assertTrue(outputs.loss is not None)
| 103 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "umt5"
A_ = ["past_key_values"]
def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
__a : Any = vocab_size
__a : Any = d_model
__a : str = d_kv
__a : Dict = d_ff
__a : Union[str, Any] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : Tuple = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : Optional[int] = dropout_rate
__a : List[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Any = use_cache
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : Dict = act_info[-1]
__a : Dict = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_heads
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_layers
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Dict = 'past_encoder_sequence + sequence'
__a : Tuple = {0: 'batch'}
__a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 5E-4
| 27 | 0 |
'''simple docstring'''
from PIL import Image
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(A__ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
lowerCAmelCase__ = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 104 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = os.path.join(args.tf_model_dir , "parameters.json" )
a : str = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
a : str = args.output + ".pt"
a : Dict = OrderedDict()
with tf.device("/CPU:0" ):
a : Optional[int] = tf.train.load_checkpoint(args.tf_model_dir )
a : Optional[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
a : Dict = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
a : Union[str, Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
a : Optional[int] = 8
a : Dict = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
a : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/moe" ):
a : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[str] = torch.tensor(_lowercase )
elif key_name.endswith("/softmlp/kernel" ):
a : Optional[int] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
a : Any = key_name[-9:-7]
for i in range(16 ):
a : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
a : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
a : Dict = torch.tensor(_lowercase )
elif key_name.startswith("model/mlp" ):
a : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
a : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.endswith("/p1/bias" ):
a : str = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
a : List[Any] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.endswith("/p2/kernel" ):
a : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
a : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/p2/bias" ):
a : Dict = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : str = torch.tensor(_lowercase )
elif key_name.startswith("model/ln" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.bias" % player
a : Tuple = vnp.copy() # same because it is one dimensional
a : int = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Optional[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
a : List[str] = vnp.copy() # same because it is one dimensional
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/att" ):
a : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
a : Union[str, Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
a : List[str] = state[:, 0, :, :]
a : Dict = state[:, 1, :, :]
a : Union[str, Any] = state[:, 2, :, :]
a : str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
a : Union[str, Any] = torch.tensor(_lowercase )
a : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
a : List[str] = torch.tensor(_lowercase )
a : Optional[Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
a : Optional[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/o/kernel" ):
a : Any = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
a : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
a : Tuple = torch.tensor(_lowercase )
elif key_name.startswith("model/an" ):
a : List[str] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
a : Optional[int] = "model.blocks.%d.self_attn.norm.bias" % player
a : Union[str, Any] = vnp.copy() # same because it is one dimensional
a : List[Any] = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
a : Any = "model.blocks.%d.self_attn.norm.weight" % player
a : str = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
a : Optional[int] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
a : Tuple = "model.%s.weight" % nlayer
a : Any = vnp.copy() # same in embedded
a : Tuple = torch.tensor(_lowercase )
if key_name.startswith("model/wte" ):
a : Optional[int] = "lm_head.weight"
a : Optional[int] = vnp.copy() # same in embedded
a : Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith("model/wob" ):
a : Optional[int] = "final_logits_bias"
a : Optional[Any] = vnp.copy() # same in embedded
a : Optional[int] = state.reshape((1, -1) )
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
a : Optional[int] = "model.last_project.weight"
a : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
a : Dict = "model.last_project.bias"
a : Optional[Any] = vnp.copy() # same because it is one dimensional
a : Any = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
a : Tuple = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 105 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__UpperCamelCase : Optional[int] = {
'''n_samples''': 6_4,
'''horizon''': 3_2,
'''num_inference_steps''': 2_0,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = '''hopper-medium-v2'''
__UpperCamelCase : Optional[int] = gym.make(env_name)
__UpperCamelCase : List[Any] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
__UpperCamelCase : List[Any] = env.reset()
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : Any = 0
__UpperCamelCase : Any = 1_0_0_0
__UpperCamelCase : Dict = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__UpperCamelCase : Optional[int] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = env.step(denorm_actions)
__UpperCamelCase : Optional[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__UpperCamelCase : str = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 106 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
for attribute in key.split('.' ):
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Union[str, Any] = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : int = []
__a : List[str] = fairseq_model.state_dict()
__a : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a : int = None
for name, value in fairseq_dict.items():
__a : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : List[str] = True
elif name.split('.' )[0] == "proj":
__a : Tuple = fairseq_model.proj
__a : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : List[Any] = True
if "*" in mapped_key:
__a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : List[Any] = 'weight_g'
elif "weight_v" in name:
__a : List[Any] = 'weight_v'
elif "bias" in name:
__a : Optional[Any] = 'bias'
elif "weight" in name:
__a : Tuple = 'weight'
else:
__a : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : List[str] = full_name.split('conv_layers.' )[-1]
__a : Any = name.split('.' )
__a : List[str] = int(items[0] )
__a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a , __a : List[str] = emb.weight.shape
__a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Tuple = [line.split(' ' )[0] for line in lines]
__a : int = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ):
__a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__a : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
__a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
__a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
__a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
__a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
__a : int = False
# add projection layer
__a : str = nn.Parameter(projection_layer.weight )
__a : Any = nn.Parameter(projection_layer.bias )
__a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = hf_wavavec.config.to_dict()
__a : Tuple = tokenizer.pad_token_id
__a : Optional[int] = tokenizer.bos_token_id
__a : Union[str, Any] = tokenizer.eos_token_id
__a : Tuple = 'speech_to_text_2'
__a : Tuple = 'wav2vec2'
__a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 27 | 0 |
from math import sqrt
def __magic_name__ ( A : int ):
'''simple docstring'''
a = 0
for i in range(1, int(sqrt(A ) + 1 ) ):
if n % i == 0 and i != sqrt(A ):
total += i + n // i
elif i == sqrt(A ):
total += i
return total - n
def __magic_name__ ( A : int = 10000 ):
'''simple docstring'''
a = sum(
i
for i in range(1, A )
if sum_of_divisors(sum_of_divisors(A ) ) == i and sum_of_divisors(A ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 107 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__lowercase : Dict = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__lowercase : Tuple = torch.device('cuda', local_rank)
__lowercase : Optional[int] = socket.gethostname()
__lowercase : List[str] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : str = dist.get_rank()
__lowercase : Union[str, Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 27 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any ="gptj"
a : Any ={
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=50_400 , snake_case__=2_048 , snake_case__=4_096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=50_256 , snake_case__=50_256 , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : Tuple = n_positions
lowerCAmelCase : List[Any] = n_embd
lowerCAmelCase : Any = n_layer
lowerCAmelCase : List[Any] = n_head
lowerCAmelCase : Optional[int] = n_inner
lowerCAmelCase : List[str] = rotary_dim
lowerCAmelCase : Dict = activation_function
lowerCAmelCase : Dict = resid_pdrop
lowerCAmelCase : List[Any] = embd_pdrop
lowerCAmelCase : List[str] = attn_pdrop
lowerCAmelCase : Optional[int] = layer_norm_epsilon
lowerCAmelCase : Optional[int] = initializer_range
lowerCAmelCase : int = use_cache
lowerCAmelCase : Dict = bos_token_id
lowerCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
"""simple docstring"""
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , "pad_token_id" , snake_case__ ):
# TODO: how to do that better?
lowerCAmelCase : Any = 0
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
lowerCAmelCase : int = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._config.n_head
def lowercase__ ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
"""simple docstring"""
lowerCAmelCase : Tuple = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase : List[str] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase , lowerCAmelCase : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase : Dict = seqlen + 2
lowerCAmelCase : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase : Union[str, Any] = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
lowerCAmelCase : Optional[Any] = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase : Optional[Any] = ordered_inputs["attention_mask"].dtype
lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self ):
"""simple docstring"""
return 13
| 108 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
A: str = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = UNetaDModel
__lowerCAmelCase : Tuple = 'sample'
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = 4
UpperCAmelCase : Dict = 3
UpperCAmelCase : Optional[Any] = (32, 32)
UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = torch.tensor([10] ).to(_SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = UNetaDModel
__lowerCAmelCase : List[str] = 'sample'
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = 4
UpperCAmelCase : str = 4
UpperCAmelCase : Any = (32, 32)
UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.tensor([10] ).to(_SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return (4, 32, 32)
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return (4, 32, 32)
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : int = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
UpperCAmelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE )
model_accelerate.to(_SCREAMING_SNAKE_CASE )
model_accelerate.eval()
UpperCAmelCase : Union[str, Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase : Dict = noise.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = torch.tensor([10] * noise.shape[0] ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model_accelerate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE , low_cpu_mem_usage=_SCREAMING_SNAKE_CASE )
model_normal_load.to(_SCREAMING_SNAKE_CASE )
model_normal_load.eval()
UpperCAmelCase : int = model_normal_load(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase : Optional[Any] = noise.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase : List[Any] = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 ) )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : List[str] = UNetaDModel
__lowerCAmelCase : List[str] = 'sample'
@property
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=(32, 32) ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Tuple = 3
UpperCAmelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
UpperCAmelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.dummy_input
UpperCAmelCase : int = floats_tensor((4, 3) + (256, 256) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = noise
UpperCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
assert image is not None, "Make sure output is not None"
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = 3
UpperCAmelCase : List[Any] = (256, 256)
UpperCAmelCase : int = torch.ones((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = torch.tensor(batch_size * [1E-4] ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : str = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase : Optional[int] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = 4
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : Optional[Any] = (32, 32)
UpperCAmelCase : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = torch.tensor(batch_size * [1E-4] ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase : int = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
| 109 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowercase : Tuple = pytest.mark.integration
__lowercase : Optional[int] = {'comet'}
__lowercase : List[str] = importlib.util.find_spec('fairseq') is not None
__lowercase : str = {'code_eval'}
__lowercase : List[Any] = os.name == 'nt'
__lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase ():
__a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@local
class __UpperCamelCase ( parameterized.TestCase ):
A_ = {}
A_ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = '[...]'
__a : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
__a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__a : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = '[...]'
__a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def __UpperCAmelCase ( self ):
'''simple docstring'''
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join('metrics' , __a ) , *__a , **__a )
with patch('datasets.load_metric' ) as mock_load_metric:
__a : Dict = load_local_metric
yield
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
def wrapper(__a ):
__a : Optional[Any] = contextmanager(__a )
__a : str = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__a : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
import torch
def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__a : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ):
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
assert len(__a ) == 2
__a : Dict = [0.19, 0.92]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__a : str = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__a : int = load_from_checkpoint
yield
def lowerCamelCase ():
__a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) )
__a : List[str] = 'ERROR'
__a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
| 27 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: str , *UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
lowercase__ = quant_trainer_args
lowercase__ = 128 # default number of calibration samples
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any]=None ) -> Any:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
lowercase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowercase__ = self._remove_unused_columns(UpperCamelCase_ , description='''Calibration''' )
return DataLoader(
UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str=None ) -> Dict:
"""simple docstring"""
lowercase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowercase__ = self.get_calib_dataloader(UpperCamelCase_ )
lowercase__ = self.model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase_ )
logger.info('''***** Running calibration *****''' )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(UpperCamelCase_ ):
# Prediction step
lowercase__ , lowercase__ , lowercase__ = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args )
lowercase__ = model
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "eval" ) -> Tuple:
"""simple docstring"""
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
self.log(UpperCamelCase_ )
else:
lowercase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , UpperCamelCase_: str = "test" ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Tuple="./" ) -> str:
"""simple docstring"""
lowercase__ = self.eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = next(iter(UpperCamelCase_ ) )
# saving device - to make it consistent
lowercase__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
lowercase__ = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
lowercase__ = True
lowercase__ = self.model.to(UpperCamelCase_ )
model.eval()
model.float()
lowercase__ = model.module if hasattr(UpperCamelCase_ , '''module''' ) else model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args )
lowercase__ = os.path.join(UpperCamelCase_ , '''model.onnx''' )
logger.info(f'exporting model to {output_model_file}' )
lowercase__ = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=UpperCamelCase_ , )
logger.info('''onnx export finished''' )
| 110 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] )
__a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] )
else:
__a : int = np.asarray(__a )
__a : str = np.asarray(__a )
if ignore_case:
__a : Dict = np.char.lower(__a )
__a : List[str] = np.char.lower(__a )
if ignore_punctuation:
__a : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Dict = np.char.translate(__a , table=__a )
if ignore_numbers:
__a : Optional[int] = string.digits.maketrans('' , '' , string.digits )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Optional[int] = np.char.translate(__a , table=__a )
__a : Any = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 27 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_A : Union[str, Any] = '\\n Text data.\n Second line of data.'
_A : Optional[int] = 'file'
@pytest.fixture(scope="session" )
def __magic_name__ ( __snake_case : Dict ) -> List[Any]:
lowercase : Optional[int] = tmp_path_factory.mktemp("data" ) / (FILE_PATH + '.zstd')
lowercase : Optional[int] = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with zstd.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def __magic_name__ ( __snake_case : int ) -> Tuple:
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def __magic_name__ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> Optional[int]:
lowercase : Optional[int] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
lowercase : Optional[int] = input_paths[compression_format]
lowercase : Any = tmp_path / 'cache'
lowercase : int = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
lowercase : str = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
lowercase : int = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
lowercase : Union[str, Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def __magic_name__ ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : str , __snake_case : List[Any] ) -> int:
lowercase : Optional[int] = 'custom_cache'
lowercase : str = 'custom_extracted_dir'
lowercase : Optional[int] = tmp_path / 'custom_extracted_path'
if default_extracted:
lowercase : str = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_SCREAMING_SNAKE_CASE ) )
lowercase : Union[str, Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase : Optional[int] = xz_file
lowercase : Dict = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
lowercase : int = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def __magic_name__ ( __snake_case : List[str] ) -> Optional[int]:
# absolute path
lowercase : List[str] = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
lowercase : List[Any] = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def __magic_name__ ( __snake_case : Optional[int] ) -> Any:
# absolute path
lowercase : Any = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
lowercase : Tuple = './__missing_file__.txt'
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( __snake_case : List[Any] ) -> int:
lowercase : Tuple = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
lowercase : str = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Optional[int]:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( __snake_case : Any ) -> int:
lowercase : Tuple = tmp_path_factory.mktemp("data" ) / 'file.html'
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get("https://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( __snake_case : Optional[Any] ) -> int:
lowercase : Optional[Any] = tmp_path_factory.mktemp("data" ) / 'file.html'
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get("ftp://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( __snake_case : Dict ) -> Optional[int]:
lowercase : Union[str, Any] = tmp_path_factory.mktemp("data" ) / 'file.html'
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get("s3://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head("s3://huggingface.co" )
| 202 |
'''simple docstring'''
import os
import sys
__lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 27 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( a , a = None , a = None , a = False , ) -> List[str]:
_A: str = cipher_alphabet or [chr(_SCREAMING_SNAKE_CASE ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_A: int = {
'a': 0.0_8497,
'b': 0.0_1492,
'c': 0.0_2202,
'd': 0.0_4253,
'e': 0.1_1162,
'f': 0.0_2228,
'g': 0.0_2015,
'h': 0.0_6094,
'i': 0.0_7546,
'j': 0.0_0153,
'k': 0.0_1292,
'l': 0.0_4025,
'm': 0.0_2406,
'n': 0.0_6749,
'o': 0.0_7507,
'p': 0.0_1929,
'q': 0.0_0095,
'r': 0.0_7587,
's': 0.0_6327,
't': 0.0_9356,
'u': 0.0_2758,
'v': 0.0_0978,
'w': 0.0_2560,
'x': 0.0_0150,
'y': 0.0_1994,
'z': 0.0_0077,
}
else:
# Custom frequencies dictionary
_A: int = frequencies_dict
if not case_sensitive:
_A: Dict = ciphertext.lower()
# Chi squared statistic values
_A: dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_SCREAMING_SNAKE_CASE ) ):
_A: Optional[Any] = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_A: Any = (alphabet_letters.index(letter.lower() ) - shift) % len(
_SCREAMING_SNAKE_CASE )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_A: Union[str, Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_A: Any = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_A: Optional[int] = decrypted_with_shift.lower().count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_A: Optional[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_A: Optional[int] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_A: Optional[Any] = decrypted_with_shift.count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_A: Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_A: Tuple = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_A: Dict = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(a ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_A: int = min(
_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE , )
# Get all the data from the most likely cipher (key, decoded message)
(
_A
): Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 121 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
def __init__( self : Dict , snake_case : Optional[int] , snake_case : Any=2 , snake_case : List[Any]=8 , snake_case : Union[str, Any]=True , snake_case : Tuple=True , snake_case : Optional[int]=True , snake_case : Optional[Any]=True , snake_case : List[str]=9_9 , snake_case : int=1_6 , snake_case : List[str]=5 , snake_case : Dict=2 , snake_case : int=3_6 , snake_case : List[str]="gelu" , snake_case : Any=0.0 , snake_case : Optional[Any]=0.0 , snake_case : Any=5_1_2 , snake_case : Optional[int]=1_6 , snake_case : Tuple=2 , snake_case : Dict=0.02 , snake_case : List[Any]=3 , snake_case : Dict=4 , snake_case : List[str]=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = parent
UpperCamelCase_ : Tuple = batch_size
UpperCamelCase_ : Optional[Any] = seq_length
UpperCamelCase_ : Dict = is_training
UpperCamelCase_ : str = use_input_mask
UpperCamelCase_ : Dict = use_token_type_ids
UpperCamelCase_ : Any = use_labels
UpperCamelCase_ : Optional[int] = vocab_size
UpperCamelCase_ : Optional[Any] = hidden_size
UpperCamelCase_ : Tuple = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : Dict = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : List[str] = hidden_dropout_prob
UpperCamelCase_ : Any = attention_probs_dropout_prob
UpperCamelCase_ : Optional[int] = max_position_embeddings
UpperCamelCase_ : Dict = type_vocab_size
UpperCamelCase_ : List[str] = type_sequence_label_size
UpperCamelCase_ : Union[str, Any] = initializer_range
UpperCamelCase_ : Any = num_labels
UpperCamelCase_ : int = num_choices
UpperCamelCase_ : Tuple = scope
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : str = None
if self.use_token_type_ids:
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : int = None
UpperCamelCase_ : Optional[Any] = None
UpperCamelCase_ : Optional[int] = None
if self.use_labels:
UpperCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.get_config()
UpperCamelCase_ : Dict = 3_0_0
return config
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
"""simple docstring"""
(
UpperCamelCase_
) : Any = self.prepare_config_and_inputs()
UpperCamelCase_ : Dict = True
UpperCamelCase_ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[Any] = MraModel(config=__a )
model.to(__a )
model.eval()
UpperCamelCase_ : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a )
UpperCamelCase_ : int = model(__a , token_type_ids=__a )
UpperCamelCase_ : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Dict , snake_case : List[Any] , ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = True
UpperCamelCase_ : Union[str, Any] = MraModel(__a )
model.to(__a )
model.eval()
UpperCamelCase_ : Optional[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
UpperCamelCase_ : Union[str, Any] = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , )
UpperCamelCase_ : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = MraForMaskedLM(config=__a )
model.to(__a )
model.eval()
UpperCamelCase_ : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Optional[int] , snake_case : List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = MraForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
UpperCamelCase_ : List[str] = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Any , snake_case : List[str] , snake_case : Any , snake_case : Tuple , snake_case : Any , snake_case : List[Any] , snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.num_labels
UpperCamelCase_ : Optional[Any] = MraForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCamelCase_ : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : str , snake_case : List[Any] , snake_case : int , snake_case : int , snake_case : int , snake_case : Any , snake_case : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.num_labels
UpperCamelCase_ : Union[str, Any] = MraForTokenClassification(config=__a )
model.to(__a )
model.eval()
UpperCamelCase_ : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Any , snake_case : str , snake_case : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.num_choices
UpperCamelCase_ : str = MraForMultipleChoice(config=__a )
model.to(__a )
model.eval()
UpperCamelCase_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ : Dict = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.prepare_config_and_inputs()
(
UpperCamelCase_
) : Dict = config_and_inputs
UpperCamelCase_ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase_ , unittest.TestCase ):
lowercase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = ()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : str = MraModelTester(self )
UpperCamelCase_ : Tuple = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ : List[str] = type
self.model_tester.create_and_check_model(*__a )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : int = MraModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason='MRA does not output attentions' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase_ : List[Any] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase_ : str = model(__a )[0]
UpperCamelCase_ : int = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , __a )
UpperCamelCase_ : Any = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase_ : Union[str, Any] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase_ : int = model(__a )[0]
UpperCamelCase_ : Any = 5_0_2_6_5
UpperCamelCase_ : Dict = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , __a )
UpperCamelCase_ : List[str] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : int = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
UpperCamelCase_ : int = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase_ : Union[str, Any] = model(__a )[0]
UpperCamelCase_ : Dict = 5_0_2_6_5
UpperCamelCase_ : Tuple = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , __a )
UpperCamelCase_ : List[Any] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 175 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[Any] = tmp_path / 'file.csv'
__a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : str = tmp_path / 'malformed_file.csv'
__a : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = tmp_path / 'csv_with_image.csv'
__a : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Union[str, Any] = tmp_path / 'csv_with_label.csv'
__a : Any = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = tmp_path / 'csv_with_int_list.csv'
__a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : int = Csv()
__a : str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1]
__a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__a : Any = csv._generate_tables([[csv_file_with_image]] )
__a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__a : Any = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__a : List[str] = csv._generate_tables([[csv_file_with_label]] )
__a : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__a : int = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__a : Any = csv._generate_tables([[csv_file_with_int_list]] )
__a : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__a : Tuple = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 27 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
lowercase_ : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase_ : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
lowercase_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ : Tuple = value
elif weight_type == "weight_g":
lowercase_ : str = value
elif weight_type == "weight_v":
lowercase_ : Optional[Any] = value
elif weight_type == "bias":
lowercase_ : Union[str, Any] = value
else:
lowercase_ : List[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowercase_ : int = []
lowercase_ : List[str] = fairseq_model.state_dict()
lowercase_ : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ : int = None
for name, value in fairseq_dict.items():
lowercase_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ : List[str] = True
elif name.split(""".""" )[0] == "proj":
lowercase_ : Tuple = fairseq_model.proj
lowercase_ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ : List[Any] = True
if "*" in mapped_key:
lowercase_ : str = name.split(_SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
lowercase_ : int = mapped_key.replace("""*""" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase_ : List[Any] = 'weight_g'
elif "weight_v" in name:
lowercase_ : List[Any] = 'weight_v'
elif "bias" in name:
lowercase_ : Optional[Any] = 'bias'
elif "weight" in name:
lowercase_ : Tuple = 'weight'
else:
lowercase_ : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
lowercase_ : List[str] = full_name.split("""conv_layers.""" )[-1]
lowercase_ : Any = name.split(""".""" )
lowercase_ : List[str] = int(items[0] )
lowercase_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Union[str, Any]:
lowercase_ : List[str] = emb.weight.shape
lowercase_ : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Tuple:
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
lowercase_ : Tuple = [line.split(""" """ )[0] for line in lines]
lowercase_ : int = len(_SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , ) -> str:
lowercase_ : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowercase_ : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
lowercase_ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
lowercase_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
lowercase_ : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
lowercase_ : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
lowercase_ : int = False
# add projection layer
lowercase_ : str = nn.Parameter(projection_layer.weight )
lowercase_ : Any = nn.Parameter(projection_layer.bias )
lowercase_ : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) , """w""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = hf_wavavec.config.to_dict()
lowercase_ : Tuple = tokenizer.pad_token_id
lowercase_ : Optional[int] = tokenizer.bos_token_id
lowercase_ : Union[str, Any] = tokenizer.eos_token_id
lowercase_ : Tuple = 'speech_to_text_2'
lowercase_ : Tuple = 'wav2vec2'
lowercase_ : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 239 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> str:
"""simple docstring"""
snake_case = tmp_path / 'file.csv'
snake_case = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case = tmp_path / 'malformed_file.csv'
snake_case = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] ) -> Any:
"""simple docstring"""
snake_case = tmp_path / 'csv_with_image.csv'
snake_case = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
snake_case = tmp_path / 'csv_with_label.csv'
snake_case = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case = tmp_path / 'csv_with_int_list.csv'
snake_case = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
snake_case = Csv()
snake_case = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
snake_case = f.read().splitlines()[1]
snake_case = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
snake_case = csv._generate_tables([[csv_file_with_image]] )
snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
snake_case = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
snake_case = f.read().splitlines()[1:]
snake_case = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
snake_case = csv._generate_tables([[csv_file_with_label]] )
snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
snake_case = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> Any:
"""simple docstring"""
snake_case = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _UpperCamelCase : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
snake_case = csv._generate_tables([[csv_file_with_int_list]] )
snake_case = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
snake_case = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 150 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 0 |
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Optional[Any] = len(_SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCamelCase : Dict = 0
count += depth_first_search(_SCREAMING_SNAKE_CASE, row + 1, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE, row - 1, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, col + 1, _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, col - 1, _SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__a : Tuple = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )]
__a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE )
__a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
__a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
__a : str = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
__a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
__a : List[Any] = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a : List[str] = test.model_tester.__class__
return model_tester
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = []
for test_class in test_classes:
__a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : str = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 27 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Any ):
"""simple docstring"""
_a = original_name.split('''.''' )[0]
_a = key.split('''.''' )
_a = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 2] )
_a = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 1] )
_a = orig_block_num - offset
_a = key.replace(f'{orig_block_num}.{layer_num}.{original_name}', f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = OrderedDict()
_a = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
_a = key.replace('''network''', '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
_a = key[: key.find('''proj''' )]
_a = key.replace(_SCREAMING_SNAKE_CASE, f'patch_embeddings.{total_embed_found}.' )
_a = key.replace('''proj''', '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
_a = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_a = replace_key_with_offset(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, '''mlp.fc1''', '''output.conv1''' )
if "mlp.fc2" in key:
_a = replace_key_with_offset(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, '''mlp.fc2''', '''output.conv2''' )
if "norm1" in key:
_a = replace_key_with_offset(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, '''norm1''', '''before_norm''' )
if "norm2" in key:
_a = replace_key_with_offset(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, '''norm2''', '''after_norm''' )
if "layer_scale_1" in key:
_a = replace_key_with_offset(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, '''layer_scale_1''', '''layer_scale_1''' )
if "layer_scale_2" in key:
_a = replace_key_with_offset(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, '''layer_scale_2''', '''layer_scale_2''' )
if "head" in key:
_a = key.replace('''head''', '''classifier''' )
_a = value
return new_state_dict
def A_ ( ):
"""simple docstring"""
_a = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a = Image.open(requests.get(_SCREAMING_SNAKE_CASE, stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = PoolFormerConfig()
# set attributes based on model_name
_a = 'huggingface/label-files'
_a = model_name[-3:]
_a = 10_00
_a = 'imagenet-1k-id2label.json'
_a = (1, 10_00)
# set config attributes
_a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, repo_type='''dataset''' ), '''r''' ) )
_a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
if size == "s12":
_a = [2, 2, 6, 2]
_a = [64, 1_28, 3_20, 5_12]
_a = 4.0
_a = 0.9
elif size == "s24":
_a = [4, 4, 12, 4]
_a = [64, 1_28, 3_20, 5_12]
_a = 4.0
_a = 0.9
elif size == "s36":
_a = [6, 6, 18, 6]
_a = [64, 1_28, 3_20, 5_12]
_a = 4.0
_a = 1e-6
_a = 0.9
elif size == "m36":
_a = [6, 6, 18, 6]
_a = [96, 1_92, 3_84, 7_68]
_a = 4.0
_a = 1e-6
_a = 0.9_5
elif size == "m48":
_a = [8, 8, 24, 8]
_a = [96, 1_92, 3_84, 7_68]
_a = 4.0
_a = 1e-6
_a = 0.9_5
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
_a = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
# Prepare image
_a = prepare_img()
_a = image_processor(images=_SCREAMING_SNAKE_CASE, return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
_a = torch.load(_SCREAMING_SNAKE_CASE, map_location=torch.device('''cpu''' ) )
# rename keys
_a = rename_keys(_SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
_a = PoolFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
_a = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
_a = image_processor(images=prepare_img(), return_tensors='''pt''' ).pixel_values
# forward pass
_a = model(_SCREAMING_SNAKE_CASE )
_a = outputs.logits
# define expected logit slices for different models
if size == "s12":
_a = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
_a = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
_a = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
_a = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
_a = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3], _SCREAMING_SNAKE_CASE, atol=1e-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 320 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase: List[Any] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
"""simple docstring"""
__A = ["pixel_values"]
def __init__(self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 255 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , **lowerCamelCase_ , ):
"""simple docstring"""
super().__init__(**__a )
a = size if size is not None else {'height': 384, 'width': 384}
a = get_size_dict(__a , default_to_square=__a )
a = do_resize
a = size
a = resample
a = do_rescale
a = rescale_factor
a = do_normalize
a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a = image_std if image_std is not None else OPENAI_CLIP_STD
a = do_convert_rgb
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
a = get_size_dict(__a , default_to_square=__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
a = (size['height'], size['width'])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ):
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ):
"""simple docstring"""
a = do_resize if do_resize is not None else self.do_resize
a = resample if resample is not None else self.resample
a = do_rescale if do_rescale is not None else self.do_rescale
a = rescale_factor if rescale_factor is not None else self.rescale_factor
a = do_normalize if do_normalize is not None else self.do_normalize
a = image_mean if image_mean is not None else self.image_mean
a = image_std if image_std is not None else self.image_std
a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a = size if size is not None else self.size
a = get_size_dict(__a , default_to_square=__a )
a = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
a = [to_numpy_array(__a ) for image in images]
if do_resize:
a = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
a = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
a = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
a = [to_channel_dimension_format(__a , __a ) for image in images]
a = BatchFeature(data={"pixel_values": images} , tensor_type=__a )
return encoded_outputs
| 227 |
'''simple docstring'''
import requests
__lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here!
__lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase : Dict = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 27 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : List[Any] = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length, 2) , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : List[str] = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length) , _SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(_SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Union[str, Any] = tensor[:sequence_length]
else:
lowerCAmelCase__ : str = tensor[:sequence_length]
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Dict = tensor[:sequence_length]
else:
lowerCAmelCase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Any = ord(_SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCAmelCase__ : Any = unicodedata.category(_SCREAMING_SNAKE_CASE )
if cat.startswith('P' ):
return True
return False
@dataclass
class A__ ( lowerCAmelCase_ ):
lowercase = 42
lowercase = True
lowercase = None
lowercase = None
lowercase = -100
lowercase = 'pt'
def _lowerCamelCase ( self : Optional[int] , a : str ):
'''simple docstring'''
import torch
lowerCAmelCase__ : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
lowerCAmelCase__ : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCAmelCase__ : List[str] = self.tokenizer.pad(
__a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
lowerCAmelCase__ : Optional[Any] = torch.tensor(batch['entity_ids'] ).shape[1]
lowerCAmelCase__ : Dict = self.tokenizer.padding_side
if padding_side == "right":
lowerCAmelCase__ : Any = [
list(__a ) + [self.label_pad_token_id] * (sequence_length - len(__a )) for label in labels
]
else:
lowerCAmelCase__ : str = [
[self.label_pad_token_id] * (sequence_length - len(__a )) + list(__a ) for label in labels
]
lowerCAmelCase__ : int = [feature['ner_tags'] for feature in features]
lowerCAmelCase__ : str = padding_tensor(__a , -1 , __a , __a )
lowerCAmelCase__ : List[Any] = [feature['original_entity_spans'] for feature in features]
lowerCAmelCase__ : Optional[Any] = padding_tensor(__a , (-1, -1) , __a , __a )
lowerCAmelCase__ : Optional[Any] = {k: torch.tensor(__a , dtype=torch.intaa ) for k, v in batch.items()}
return batch | 212 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__a , self ).__init__()
__a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a )
__a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.bert(**__a ).last_hidden_state
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__a )
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : str = W_supports['sizes'].tolist()
__a : Union[str, Any] = W_supports['start_token_id'].item()
__a : Any = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Tuple = self.BERT(**__a )
__a : str = self.BERT(**__a )
__a : Any = None
__a : Dict = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__a : Optional[int] = 0
else:
__a : Union[str, Any] = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : str = torch.vstack((p_starts, p_start) )
__a : str = torch.vstack((p_ends, p_end) )
else:
__a : List[str] = p_start
__a : int = p_end
return p_starts, p_ends
| 27 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCamelCase = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase__ ( _A : Optional[int] , _A : Any , _A : Any , _A : Dict , _A : Dict , _A : Any ):
'''simple docstring'''
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a__ ='lm_head'
a__ =getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__ =getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__ =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
else:
a__ =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase__ ( _A : int , _A : int , _A : List[str] ):
'''simple docstring'''
a__ =[]
a__ =fairseq_model.state_dict()
a__ =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a__ =False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
a__ =True
else:
for key, mapped_key in MAPPING.items():
a__ ='unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a__ =True
if "*" in mapped_key:
a__ =name.split(_SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
a__ =mapped_key.replace('''*''' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ ='weight'
else:
a__ =None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCAmelCase__ ( _A : Tuple , _A : Any , _A : List[Any] , _A : Any , _A : List[Any] ):
'''simple docstring'''
a__ =full_name.split('''conv_layers.''' )[-1]
a__ =name.split('''.''' )
a__ =int(items[0] )
a__ =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a__ =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a__ =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a__ =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a__ =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase__ ( _A : Union[str, Any] , _A : List[str] , _A : Optional[Any]=None , _A : List[Any]=None , _A : List[str]=True ):
'''simple docstring'''
if config_path is not None:
a__ =UniSpeechConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__ =UniSpeechConfig()
if is_finetuned:
if dict_path:
a__ =Dictionary.load_from_json(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ =target_dict.pad_index
a__ =target_dict.bos_index
a__ =target_dict.eos_index
a__ =len(target_dict.symbols )
a__ =os.path.join(_SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
a__ =target_dict.indices
# fairseq has the <pad> and <s> switched
a__ =42
a__ =43
with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ =WavaVecaPhonemeCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_SCREAMING_SNAKE_CASE , )
a__ =True if config.feat_extract_norm == 'layer' else False
a__ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
a__ =WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
a__ =UniSpeechForCTC(_SCREAMING_SNAKE_CASE )
else:
a__ =UniSpeechForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned:
a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
a__ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a__ =model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_unispeech.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 188 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE : int = '''bit'''
__SCREAMING_SNAKE_CASE : Optional[int] = ['''preactivation''', '''bottleneck''']
__SCREAMING_SNAKE_CASE : Tuple = ['''SAME''', '''VALID''']
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="preactivation" , snake_case="relu" , snake_case=None , snake_case=32 , snake_case=0.0 , snake_case=False , snake_case=32 , snake_case=1 , snake_case=None , snake_case=None , **snake_case , ):
super().__init__(**__a )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case_ = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = global_padding
snake_case_ = num_groups
snake_case_ = drop_path_rate
snake_case_ = embedding_dynamic_padding
snake_case_ = output_stride
snake_case_ = width_factor
snake_case_ = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(__a ) + 1 )]
snake_case_ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 285 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ort.SessionOptions()
__a : Dict = False
return options
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = 'A red cat sitting on a park bench'
__a : int = np.random.RandomState(0 )
__a : Tuple = pipe(
prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( lowerCAmelCase_, unittest.TestCase ):
__lowerCAmelCase = KandinskyImgaImgPipeline
__lowerCAmelCase = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
__lowerCAmelCase = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
__lowerCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase = False
@property
def __magic_name__ ( self ):
return 32
@property
def __magic_name__ ( self ):
return 32
@property
def __magic_name__ ( self ):
return self.time_input_dim
@property
def __magic_name__ ( self ):
return self.time_input_dim * 4
@property
def __magic_name__ ( self ):
return 100
@property
def __magic_name__ ( self ):
lowercase : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : Tuple = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowercase : Any = MultilingualCLIP(__a )
lowercase : int = text_encoder.eval()
return text_encoder
@property
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : Dict = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowercase : str = UNetaDConditionModel(**__a )
return model
@property
def __magic_name__ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self ):
lowercase : Any = self.dummy_text_encoder
lowercase : Any = self.dummy_tokenizer
lowercase : Union[str, Any] = self.dummy_unet
lowercase : List[Any] = self.dummy_movq
lowercase : Dict = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowercase : Dict = DDIMScheduler(**__a )
lowercase : Optional[int] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __magic_name__ ( self , _a , _a=0 ):
lowercase : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a ) ).to(__a )
lowercase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__a )
# create init_image
lowercase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
lowercase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : List[Any] = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((256, 256) )
if str(__a ).startswith("mps" ):
lowercase : str = torch.manual_seed(__a )
else:
lowercase : Union[str, Any] = torch.Generator(device=__a ).manual_seed(__a )
lowercase : Optional[int] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __magic_name__ ( self ):
lowercase : int = 'cpu'
lowercase : Any = self.get_dummy_components()
lowercase : List[Any] = self.pipeline_class(**__a )
lowercase : Optional[int] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowercase : Tuple = pipe(**self.get_dummy_inputs(__a ) )
lowercase : int = output.images
lowercase : int = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
lowercase : Optional[Any] = image[0, -3:, -3:, -1]
lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase : int = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ):
lowercase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
lowercase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowercase : Optional[Any] = 'A red cartoon frog, 4k'
lowercase : int = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
lowercase : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
lowercase : Optional[int] = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
lowercase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase : List[str] = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowercase : Union[str, Any] = pipeline(
__a , image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
lowercase : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
| 202 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Dict = 16
__lowercase : List[Any] = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : Dict = torch.cuda.memory_allocated()
__a : List[Any] = torch.cuda.max_memory_allocated()
__a : Tuple = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : int = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
# Initialize accelerator
__a : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Dict = config['lr']
__a : str = int(config['num_epochs'] )
__a : Optional[int] = int(config['seed'] )
__a : Any = int(config['batch_size'] )
__a : List[str] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Union[str, Any] = 1
__a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : str = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : str = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : str = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[str] = parser.parse_args()
__a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 | 0 |
from __future__ import annotations
from math import gcd
def lowerCamelCase__ ( a , a = 2 , a = 1 , a = 3 , ) -> List[str]:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('''The input value cannot be less than 2''' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(a , a , a ) -> int:
return (pow(_SCREAMING_SNAKE_CASE , 2 ) + step) % modulus
for _ in range(_SCREAMING_SNAKE_CASE ):
# These track the position within the cycle detection logic.
_A: int = seed
_A: Tuple = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
_A: List[str] = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A: Optional[int] = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A: Optional[Any] = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
_A: List[str] = gcd(hare - tortoise , _SCREAMING_SNAKE_CASE )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
_A: List[str] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCAmelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
UpperCAmelCase__ : List[str] = parser.parse_args()
UpperCAmelCase__ : List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
UpperCAmelCase__ : List[str] = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 121 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
UpperCamelCase_ : Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_one=__a , )
UpperCamelCase_ : Any = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_zero=__a , )
torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
UpperCamelCase_ : List[Any] = CLIPTextModel(__a )
UpperCamelCase_ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Dict , snake_case : List[Any]=0 ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : int = floats_tensor((1, 1_6, 1_6) , rng=random.Random(__a ) ).to(__a )
UpperCamelCase_ : Union[str, Any] = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith('mps' ):
UpperCamelCase_ : Any = torch.manual_seed(__a )
else:
UpperCamelCase_ : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
UpperCamelCase_ : Any = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : List[Any] , snake_case : List[str]=0 ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__a ) ).to(__a )
UpperCamelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ : List[str] = Image.fromarray(np.uinta(__a ) ).convert('RGB' )
if str(__a ).startswith('mps' ):
UpperCamelCase_ : Optional[int] = torch.manual_seed(__a )
else:
UpperCamelCase_ : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
UpperCamelCase_ : str = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Optional[Any] , snake_case : Any=0 ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__a ) ).to(__a )
UpperCamelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase_ : str = Image.fromarray(np.uinta(__a ) ).convert('RGB' )
if str(__a ).startswith('mps' ):
UpperCamelCase_ : Optional[Any] = torch.manual_seed(__a )
else:
UpperCamelCase_ : List[str] = torch.Generator(device=__a ).manual_seed(__a )
UpperCamelCase_ : str = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
UpperCamelCase_ : Tuple = self.get_dummy_components()
UpperCamelCase_ : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__a , __a , __a )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCamelCase_ : Optional[Any] = self.get_dummy_inputs(__a )
UpperCamelCase_ : str = pipe(**__a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__a )
UpperCamelCase_ : Any = self.pipeline_class.from_pretrained(__a )
pipe_loaded.to(__a )
pipe_loaded.set_progress_bar_config(disable=__a )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__a , __a ) is None , f"`{optional_component}` did not stay set to None after loading." , )
UpperCamelCase_ : str = self.get_dummy_inputs(__a )
UpperCamelCase_ : Tuple = pipe_loaded(**__a )[0]
UpperCamelCase_ : str = np.abs(output - output_loaded ).max()
self.assertLess(__a , 1e-4 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'cpu'
UpperCamelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCamelCase_ : Optional[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase_ : Any = self.get_dummy_mask_inputs(__a )
UpperCamelCase_ : Dict = pipe.generate_mask(**__a )
UpperCamelCase_ : Optional[int] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 1_6, 1_6) )
UpperCamelCase_ : int = np.array([0] * 9 )
UpperCamelCase_ : str = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Any = 'cpu'
UpperCamelCase_ : List[Any] = self.get_dummy_components()
UpperCamelCase_ : Optional[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase_ : Dict = self.get_dummy_inversion_inputs(__a )
UpperCamelCase_ : Optional[int] = pipe.invert(**__a ).images
UpperCamelCase_ : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
UpperCamelCase_ : Optional[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCamelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Any = 'cpu'
UpperCamelCase_ : str = self.get_dummy_components()
UpperCamelCase_ : str = {'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
UpperCamelCase_ : Any = DPMSolverMultistepScheduler(**__a )
UpperCamelCase_ : Any = DPMSolverMultistepInverseScheduler(**__a )
UpperCamelCase_ : Tuple = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase_ : str = self.get_dummy_inversion_inputs(__a )
UpperCamelCase_ : List[Any] = pipe.invert(**__a ).images
UpperCamelCase_ : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 3_2, 3_2, 3) )
UpperCamelCase_ : int = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
UpperCamelCase_ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
UpperCamelCase_ : str = raw_image.convert('RGB' ).resize((7_6_8, 7_6_8) )
UpperCamelCase_ : str = raw_image
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = torch.manual_seed(0 )
UpperCamelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__a , torch_dtype=torch.floataa )
UpperCamelCase_ : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCamelCase_ : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase_ : int = 'a bowl of fruit'
UpperCamelCase_ : Dict = 'a bowl of pears'
UpperCamelCase_ : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=__a , target_prompt=__a , generator=__a , )
UpperCamelCase_ : str = pipe.invert(
prompt=__a , image=self.raw_image , inpaint_strength=0.7 , generator=__a ).latents
UpperCamelCase_ : Dict = pipe(
prompt=__a , mask_image=__a , image_latents=__a , generator=__a , negative_prompt=__a , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
UpperCamelCase_ : str = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Any = torch.manual_seed(0 )
UpperCamelCase_ : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__a , torch_dtype=torch.floataa )
UpperCamelCase_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCamelCase_ : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase_ : Dict = 'a bowl of fruit'
UpperCamelCase_ : Optional[Any] = 'a bowl of pears'
UpperCamelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=__a , target_prompt=__a , generator=__a , )
UpperCamelCase_ : Any = pipe.invert(
prompt=__a , image=self.raw_image , inpaint_strength=0.7 , generator=__a , num_inference_steps=2_5 , ).latents
UpperCamelCase_ : List[str] = pipe(
prompt=__a , mask_image=__a , image_latents=__a , generator=__a , negative_prompt=__a , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type='numpy' , ).images[0]
UpperCamelCase_ : int = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_6_8, 7_6_8) ) )
/ 2_5_5
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 175 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[Any] = torch.device('cpu')
def lowerCamelCase ():
__a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = dct.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = []
for k in state_dict.keys():
__a : List[Any] = k
if ".pwconv" in k:
__a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[str] = 1_000
__a : Tuple = 'huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : Dict = [3, 3, 6, 4]
__a : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Dict = [3, 3, 9, 6]
__a : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : Dict = [4, 3, 10, 5]
__a : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Tuple = [4, 4, 12, 6]
__a : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[Any] = checkpoint
__a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Tuple = prepare_img()
__a : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 27 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( lowerCAmelCase_, unittest.TestCase):
UpperCamelCase__ = AudioLDMPipeline
UpperCamelCase__ = TEXT_TO_AUDIO_PARAMS
UpperCamelCase__ = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def SCREAMING_SNAKE_CASE_ ( self : str ):
torch.manual_seed(0 )
lowercase_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
lowercase_ : Dict = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowercase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ : Any = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
lowercase_ : List[str] = ClapTextModelWithProjection(__a )
lowercase_ : Any = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
lowercase_ : str = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
lowercase_ : Optional[int] = SpeechTaHifiGan(__a )
lowercase_ : Dict = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str , lowercase_ : Any=0 ):
if str(__a ).startswith("""mps""" ):
lowercase_ : List[str] = torch.manual_seed(__a )
else:
lowercase_ : Union[str, Any] = torch.Generator(device=__a ).manual_seed(__a )
lowercase_ : int = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : Tuple = self.get_dummy_components()
lowercase_ : List[str] = AudioLDMPipeline(**__a )
lowercase_ : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowercase_ : List[Any] = self.get_dummy_inputs(__a )
lowercase_ : Any = audioldm_pipe(**__a )
lowercase_ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
lowercase_ : Dict = audio[:10]
lowercase_ : Union[str, Any] = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : str = self.get_dummy_components()
lowercase_ : Optional[Any] = AudioLDMPipeline(**__a )
lowercase_ : Any = audioldm_pipe.to(__a )
lowercase_ : List[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowercase_ : Any = self.get_dummy_inputs(__a )
lowercase_ : Optional[int] = 3 * [inputs['prompt']]
# forward
lowercase_ : List[Any] = audioldm_pipe(**__a )
lowercase_ : str = output.audios[0]
lowercase_ : Any = self.get_dummy_inputs(__a )
lowercase_ : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
lowercase_ : int = audioldm_pipe.tokenizer(
__a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="""pt""" , )
lowercase_ : str = text_inputs['input_ids'].to(__a )
lowercase_ : Optional[int] = audioldm_pipe.text_encoder(
__a , )
lowercase_ : int = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase_ : Union[str, Any] = F.normalize(__a , dim=-1 )
lowercase_ : Optional[int] = prompt_embeds
# forward
lowercase_ : Union[str, Any] = audioldm_pipe(**__a )
lowercase_ : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = self.get_dummy_components()
lowercase_ : Optional[int] = AudioLDMPipeline(**__a )
lowercase_ : Union[str, Any] = audioldm_pipe.to(__a )
lowercase_ : Union[str, Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowercase_ : str = self.get_dummy_inputs(__a )
lowercase_ : List[str] = 3 * ['this is a negative prompt']
lowercase_ : int = negative_prompt
lowercase_ : List[Any] = 3 * [inputs['prompt']]
# forward
lowercase_ : Dict = audioldm_pipe(**__a )
lowercase_ : str = output.audios[0]
lowercase_ : List[Any] = self.get_dummy_inputs(__a )
lowercase_ : List[str] = 3 * [inputs.pop("""prompt""" )]
lowercase_ : int = []
for p in [prompt, negative_prompt]:
lowercase_ : List[str] = audioldm_pipe.tokenizer(
__a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="""pt""" , )
lowercase_ : Dict = text_inputs['input_ids'].to(__a )
lowercase_ : List[Any] = audioldm_pipe.text_encoder(
__a , )
lowercase_ : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase_ : Optional[int] = F.normalize(__a , dim=-1 )
embeds.append(__a )
lowercase_ : Union[str, Any] = embeds
# forward
lowercase_ : List[str] = audioldm_pipe(**__a )
lowercase_ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : Tuple = self.get_dummy_components()
lowercase_ : Optional[int] = PNDMScheduler(skip_prk_steps=__a )
lowercase_ : List[Any] = AudioLDMPipeline(**__a )
lowercase_ : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowercase_ : List[str] = self.get_dummy_inputs(__a )
lowercase_ : str = 'egg cracking'
lowercase_ : Union[str, Any] = audioldm_pipe(**__a , negative_prompt=__a )
lowercase_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
lowercase_ : Tuple = audio[:10]
lowercase_ : List[Any] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : int = self.get_dummy_components()
lowercase_ : Dict = PNDMScheduler(skip_prk_steps=__a )
lowercase_ : Any = AudioLDMPipeline(**__a )
lowercase_ : Dict = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowercase_ : List[str] = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
lowercase_ : str = audioldm_pipe(__a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase_ : Tuple = 2
lowercase_ : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase_ : Tuple = 2
lowercase_ : Dict = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase_ : Optional[Any] = 2
lowercase_ : List[str] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : int = self.get_dummy_components()
lowercase_ : Optional[Any] = AudioLDMPipeline(**__a )
lowercase_ : Any = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowercase_ : Dict = audioldm_pipe.vocoder.config.sampling_rate
lowercase_ : Union[str, Any] = self.get_dummy_inputs(__a )
lowercase_ : str = audioldm_pipe(audio_length_in_s=0.0_16 , **__a )
lowercase_ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_16
lowercase_ : Tuple = audioldm_pipe(audio_length_in_s=0.0_32 , **__a )
lowercase_ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_32
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = self.get_dummy_components()
lowercase_ : int = AudioLDMPipeline(**__a )
lowercase_ : Optional[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowercase_ : Optional[Any] = ['hey']
lowercase_ : Optional[Any] = audioldm_pipe(__a , num_inference_steps=1 )
lowercase_ : int = output.audios.shape
assert audio_shape == (1, 256)
lowercase_ : str = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase_ : Any = SpeechTaHifiGan(__a ).to(__a )
lowercase_ : str = audioldm_pipe(__a , num_inference_steps=1 )
lowercase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a )
@slow
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any="cpu" , lowercase_ : Union[str, Any]=torch.floataa , lowercase_ : int=0 ):
lowercase_ : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowercase_ : List[str] = np.random.RandomState(__a ).standard_normal((1, 8, 128, 16) )
lowercase_ : Tuple = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowercase_ : Optional[Any] = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[str] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowercase_ : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowercase_ : Optional[Any] = self.get_inputs(__a )
lowercase_ : Optional[Any] = 25
lowercase_ : List[Any] = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81920
lowercase_ : List[str] = audio[77230:77240]
lowercase_ : str = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowercase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowercase_ : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase_ : Optional[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowercase_ : Dict = self.get_inputs(__a )
lowercase_ : Tuple = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81920
lowercase_ : int = audio[27780:27790]
lowercase_ : str = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowercase_ : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 239 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "umt5"
A_ = ["past_key_values"]
def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
__a : Any = vocab_size
__a : Any = d_model
__a : str = d_kv
__a : Dict = d_ff
__a : Union[str, Any] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : Tuple = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : Optional[int] = dropout_rate
__a : List[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Any = use_cache
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : Dict = act_info[-1]
__a : Dict = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_heads
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_layers
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Dict = 'past_encoder_sequence + sequence'
__a : Tuple = {0: 'batch'}
__a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 5E-4
| 27 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class lowerCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
_lowerCAmelCase : str = """roberta-prelayernorm"""
def __init__( self , lowerCAmelCase=5_02_65 , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=30_72 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 150 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_3 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=False , __magic_name__=False , __magic_name__=2 , __magic_name__=9_9 , __magic_name__=0 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=2 , __magic_name__=4 , __magic_name__="last" , __magic_name__=True , __magic_name__=None , __magic_name__=0 , ):
lowerCamelCase : List[str] = parent
lowerCamelCase : int = batch_size
lowerCamelCase : str = seq_length
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : Any = use_input_lengths
lowerCamelCase : Optional[int] = use_token_type_ids
lowerCamelCase : int = use_labels
lowerCamelCase : Dict = gelu_activation
lowerCamelCase : int = sinusoidal_embeddings
lowerCamelCase : str = causal
lowerCamelCase : Union[str, Any] = asm
lowerCamelCase : Optional[Any] = n_langs
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : List[str] = n_special
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Optional[Any] = type_sequence_label_size
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Tuple = num_labels
lowerCamelCase : List[str] = num_choices
lowerCamelCase : str = summary_type
lowerCamelCase : int = use_proj
lowerCamelCase : Dict = scope
lowerCamelCase : Optional[int] = bos_token_id
def UpperCamelCase__ ( self ):
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Tuple = None
if self.use_input_lengths:
lowerCamelCase : List[str] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Any = XLMModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a , lengths=__a , langs=__a )
lowerCamelCase : Union[str, Any] = model(__a , langs=__a )
lowerCamelCase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Optional[Any] = XLMWithLMHeadModel(__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[str] = XLMForQuestionAnsweringSimple(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
lowerCamelCase : Optional[int] = model(__a , start_positions=__a , end_positions=__a )
lowerCamelCase : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : Any = XLMForQuestionAnswering(__a )
model.to(__a )
model.eval()
lowerCamelCase : Dict = model(__a )
lowerCamelCase : Optional[Any] = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , p_mask=__a , )
lowerCamelCase : List[Any] = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , )
(lowerCamelCase ) : Union[str, Any] = result_with_labels.to_tuple()
lowerCamelCase : Union[str, Any] = model(__a , start_positions=__a , end_positions=__a )
(lowerCamelCase ) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[Any] = XLMForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[int] = model(__a )
lowerCamelCase : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : List[Any] = self.num_labels
lowerCamelCase : Any = XLMForTokenClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[int] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ):
lowerCamelCase : str = self.num_choices
lowerCamelCase : Union[str, Any] = XLMForMultipleChoice(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : str = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
lowerCamelCase
) : Optional[int] = config_and_inputs
lowerCamelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase):
_UpperCAmelCase : Optional[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : int = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__=False ):
lowerCamelCase : Optional[Any] = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowerCamelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
lowerCamelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = XLMModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__a , emb_dim=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__a )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__=1 ):
self.assertIsInstance(__a , __a )
self.assertListEqual(
[isinstance(__a , __a ) for iter_attentions in attentions] , [True] * len(__a ) )
self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__a ):
# adds PAD dummy token
lowerCamelCase : Optional[Any] = min_length + idx + 1
lowerCamelCase : List[str] = min_length + idx + 1
lowerCamelCase : List[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__a ) )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__=1 ):
self.assertIsInstance(__a , __a )
self.assertListEqual(
[isinstance(__a , __a ) for iter_hidden_states in hidden_states] , [True] * len(__a ) , )
self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__a ):
# adds PAD dummy token
lowerCamelCase : List[str] = min_length + idx + 1
lowerCamelCase : Dict = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__a ) , )
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[Any] = XLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : List[str] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(__a )
lowerCamelCase : Union[str, Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=__a ) # the president
lowerCamelCase : int = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowerCamelCase : Optional[Any] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __a )
| 287 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
for attribute in key.split('.' ):
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Union[str, Any] = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : int = []
__a : List[str] = fairseq_model.state_dict()
__a : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a : int = None
for name, value in fairseq_dict.items():
__a : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : List[str] = True
elif name.split('.' )[0] == "proj":
__a : Tuple = fairseq_model.proj
__a : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : List[Any] = True
if "*" in mapped_key:
__a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : List[Any] = 'weight_g'
elif "weight_v" in name:
__a : List[Any] = 'weight_v'
elif "bias" in name:
__a : Optional[Any] = 'bias'
elif "weight" in name:
__a : Tuple = 'weight'
else:
__a : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : List[str] = full_name.split('conv_layers.' )[-1]
__a : Any = name.split('.' )
__a : List[str] = int(items[0] )
__a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a , __a : List[str] = emb.weight.shape
__a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Tuple = [line.split(' ' )[0] for line in lines]
__a : int = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ):
__a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__a : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
__a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
__a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
__a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
__a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
__a : int = False
# add projection layer
__a : str = nn.Parameter(projection_layer.weight )
__a : Any = nn.Parameter(projection_layer.bias )
__a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = hf_wavavec.config.to_dict()
__a : Tuple = tokenizer.pad_token_id
__a : Optional[int] = tokenizer.bos_token_id
__a : Union[str, Any] = tokenizer.eos_token_id
__a : Tuple = 'speech_to_text_2'
__a : Tuple = 'wav2vec2'
__a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 27 | 0 |
import numpy as np
import qiskit
def a( A : int = 8 , A : int | None = None ) -> List[str]:
"""simple docstring"""
a = np.random.default_rng(seed=_SCREAMING_SNAKE_CASE )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
a = 6 * key_len
# Measurement basis for Alice's qubits.
a = rng.integers(2 , size=_SCREAMING_SNAKE_CASE )
# The set of states Alice will prepare.
a = rng.integers(2 , size=_SCREAMING_SNAKE_CASE )
# Measurement basis for Bob's qubits.
a = rng.integers(2 , size=_SCREAMING_SNAKE_CASE )
# Quantum Circuit to simulate BB84
a = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_SCREAMING_SNAKE_CASE ):
if alice_state[index] == 1:
bbaa_circ.x(_SCREAMING_SNAKE_CASE )
if alice_basis[index] == 1:
bbaa_circ.h(_SCREAMING_SNAKE_CASE )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_SCREAMING_SNAKE_CASE ):
if bob_basis[index] == 1:
bbaa_circ.h(_SCREAMING_SNAKE_CASE )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
a = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
a = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1 , seed_simulator=_SCREAMING_SNAKE_CASE )
# Returns the result of measurement.
a = job.result().get_counts(_SCREAMING_SNAKE_CASE ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
a = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
a = gen_key[:key_len] if len(_SCREAMING_SNAKE_CASE ) >= key_len else gen_key.ljust(_SCREAMING_SNAKE_CASE , "0" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 227 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__lowercase : Dict = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__lowercase : Tuple = torch.device('cuda', local_rank)
__lowercase : Optional[int] = socket.gethostname()
__lowercase : List[str] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : str = dist.get_rank()
__lowercase : Union[str, Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 27 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class A__ ( lowerCAmelCase_ ):
lowercase = 'markuplm'
def __init__( self : List[str] , a : Optional[Any]=30_522 , a : int=768 , a : Tuple=12 , a : Union[str, Any]=12 , a : List[str]=3_072 , a : int="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : Tuple=512 , a : Dict=2 , a : Tuple=0.0_2 , a : Dict=1E-12 , a : Dict=0 , a : Union[str, Any]=0 , a : List[str]=2 , a : Any=256 , a : int=1_024 , a : Dict=216 , a : Union[str, Any]=1_001 , a : Tuple=32 , a : List[Any]=50 , a : Optional[int]="absolute" , a : Union[str, Any]=True , a : List[Any]=None , **a : Optional[int] , ):
'''simple docstring'''
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Optional[Any] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : Optional[Any] = position_embedding_type
lowerCAmelCase__ : Optional[int] = use_cache
lowerCAmelCase__ : Optional[Any] = classifier_dropout
# additional properties
lowerCAmelCase__ : int = max_depth
lowerCAmelCase__ : Union[str, Any] = max_xpath_tag_unit_embeddings
lowerCAmelCase__ : str = max_xpath_subs_unit_embeddings
lowerCAmelCase__ : Optional[Any] = tag_pad_id
lowerCAmelCase__ : Dict = subs_pad_id
lowerCAmelCase__ : List[str] = xpath_unit_hidden_size | 212 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '▁'
lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCamelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCamelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
lowerCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class __magic_name__ ( lowerCAmelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[int] = ['input_ids', 'attention_mask']
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Optional[int] = []
def __init__( self, lowercase_, lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=None, lowercase_=None, lowercase_=None, lowercase_ = None, lowercase_=None, **lowercase_, ) -> List[str]:
"""simple docstring"""
a__ =AddedToken(__a, lstrip=__a, rstrip=__a ) if isinstance(__a, __a ) else mask_token
a__ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, tokenizer_file=__a, src_lang=__a, tgt_lang=__a, additional_special_tokens=__a, sp_model_kwargs=self.sp_model_kwargs, **__a, )
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
a__ =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ =1
a__ =len(self.sp_model )
a__ ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
a__ ={v: k for k, v in self.lang_code_to_id.items()}
a__ =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a__ =src_lang if src_lang is not None else 'en_XX'
a__ =self.lang_code_to_id[self._src_lang]
a__ =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> List[str]:
"""simple docstring"""
a__ =self.__dict__.copy()
a__ =None
a__ =self.sp_model.serialized_model_proto()
return state
def __setstate__( self, lowercase_ ) -> Any:
"""simple docstring"""
a__ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
a__ ={}
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self, lowercase_ ) -> Dict:
"""simple docstring"""
a__ =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None, lowercase_ = False ) -> Any:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a )
a__ =[1] * len(self.prefix_tokens )
a__ =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> Optional[Any]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> List[Any]:
"""simple docstring"""
a__ =[self.sep_token_id]
a__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_, **lowercase_ ) -> Optional[int]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
a__ =src_lang
a__ =self(__a, add_special_tokens=__a, return_tensors=__a, **__a )
a__ =self.convert_tokens_to_ids(__a )
a__ =tgt_lang_id
return inputs
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ ={self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self, lowercase_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__a, out_type=__a )
def _UpperCAmelCase ( self, lowercase_ ) -> Optional[int]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ =self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self, lowercase_ ) -> Tuple:
"""simple docstring"""
a__ =''.join(__a ).replace(__a, ''' ''' ).strip()
return out_string
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> Optional[int]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ =os.path.join(
__a, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a, '''wb''' ) as fi:
a__ =self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def _UpperCAmelCase ( self, lowercase_, lowercase_ = "en_XX", lowercase_ = None, lowercase_ = "ro_RO", **lowercase_, ) -> List[str]:
"""simple docstring"""
a__ =src_lang
a__ =tgt_lang
return super().prepare_seqaseq_batch(__a, __a, **__a )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self, lowercase_ ) -> List[str]:
"""simple docstring"""
a__ =self.lang_code_to_id[src_lang]
a__ =[]
a__ =[self.eos_token_id, self.cur_lang_code]
def _UpperCAmelCase ( self, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
a__ =self.lang_code_to_id[lang]
a__ =[]
a__ =[self.eos_token_id, self.cur_lang_code]
| 188 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowercase : Tuple = pytest.mark.integration
__lowercase : Optional[int] = {'comet'}
__lowercase : List[str] = importlib.util.find_spec('fairseq') is not None
__lowercase : str = {'code_eval'}
__lowercase : List[Any] = os.name == 'nt'
__lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase ():
__a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@local
class __UpperCamelCase ( parameterized.TestCase ):
A_ = {}
A_ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = '[...]'
__a : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
__a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__a : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = '[...]'
__a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def __UpperCAmelCase ( self ):
'''simple docstring'''
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join('metrics' , __a ) , *__a , **__a )
with patch('datasets.load_metric' ) as mock_load_metric:
__a : Dict = load_local_metric
yield
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
def wrapper(__a ):
__a : Optional[Any] = contextmanager(__a )
__a : str = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__a : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
import torch
def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__a : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ):
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
assert len(__a ) == 2
__a : Dict = [0.19, 0.92]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__a : str = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__a : int = load_from_checkpoint
yield
def lowerCamelCase ():
__a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) )
__a : List[str] = 'ERROR'
__a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
| 27 | 0 |
from collections import defaultdict
class lowercase :
def __init__( self , snake_case , snake_case ):
snake_case_ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case_ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__a ) )
]
snake_case_ = defaultdict(__a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case_ = (1 << len(__a )) - 1
def a ( self , snake_case , snake_case ):
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case_ = self.count_ways_until(__a , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
snake_case_ = total_ways_util
return self.dp[mask][task_no]
def a ( self , snake_case ):
for i in range(len(__a ) ):
for j in task_performed[i]:
self.task[j].append(__a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_UpperCAmelCase : int = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 285 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] )
__a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] )
else:
__a : int = np.asarray(__a )
__a : str = np.asarray(__a )
if ignore_case:
__a : Dict = np.char.lower(__a )
__a : List[str] = np.char.lower(__a )
if ignore_punctuation:
__a : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Dict = np.char.translate(__a , table=__a )
if ignore_numbers:
__a : Optional[int] = string.digits.maketrans('' , '' , string.digits )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Optional[int] = np.char.translate(__a , table=__a )
__a : Any = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 27 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : str = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_A : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 |
'''simple docstring'''
import os
import sys
__lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 27 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = '''distilbert'''
__UpperCamelCase : Any = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : Optional[int] , lowerCAmelCase_ : Any=3_0_5_2_2 , lowerCAmelCase_ : Dict=5_1_2 , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=6 , lowerCAmelCase_ : Union[str, Any]=1_2 , lowerCAmelCase_ : Any=7_6_8 , lowerCAmelCase_ : Optional[int]=4 * 7_6_8 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any=0.2 , lowerCAmelCase_ : Optional[int]=0 , **lowerCAmelCase_ : Optional[int] , ):
"""simple docstring"""
_A: Any = vocab_size
_A: List[str] = max_position_embeddings
_A: Optional[Any] = sinusoidal_pos_embds
_A: int = n_layers
_A: Optional[Any] = n_heads
_A: Optional[int] = dim
_A: Optional[int] = hidden_dim
_A: Optional[Any] = dropout
_A: Tuple = attention_dropout
_A: Dict = activation
_A: List[str] = initializer_range
_A: Optional[Any] = qa_dropout
_A: Optional[int] = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
_A: Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A: Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 121 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowercase :
lowercase = 4_2
lowercase = 4_2
class _lowercase :
def __init__( self : Optional[int] , snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : list[list[Edge]] = [[] for _ in range(__a )]
UpperCamelCase_ : int = size
def __getitem__( self : Dict , snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return self._size
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(__a , __a ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Any , snake_case : str ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = deque([start_vertex] )
UpperCamelCase_ : list[int | None] = [None] * self.size
UpperCamelCase_ : Optional[int] = 0
while queue:
UpperCamelCase_ : Any = queue.popleft()
UpperCamelCase_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCamelCase_ : Optional[Any] = current_distance + edge.weight
UpperCamelCase_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(__a , __a )
and new_distance >= dest_vertex_distance
):
continue
UpperCamelCase_ : Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[Any] = tmp_path / 'file.csv'
__a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : str = tmp_path / 'malformed_file.csv'
__a : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = tmp_path / 'csv_with_image.csv'
__a : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Union[str, Any] = tmp_path / 'csv_with_label.csv'
__a : Any = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = tmp_path / 'csv_with_int_list.csv'
__a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : int = Csv()
__a : str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1]
__a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__a : Any = csv._generate_tables([[csv_file_with_image]] )
__a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__a : Any = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__a : List[str] = csv._generate_tables([[csv_file_with_label]] )
__a : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__a : int = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__a : Any = csv._generate_tables([[csv_file_with_int_list]] )
__a : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__a : Tuple = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 27 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase : Dict = 16
_lowercase : List[Any] = 32
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
return int(x / 2**20 )
class __magic_name__ :
def __enter__( self : Union[str, Any] ):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowercase_ : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self : List[Any] , *lowercase_ : int ):
gc.collect()
torch.cuda.empty_cache()
lowercase_ : Dict = torch.cuda.memory_allocated()
lowercase_ : List[Any] = torch.cuda.max_memory_allocated()
lowercase_ : Tuple = bamb(self.end - self.begin )
lowercase_ : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase ( UpperCAmelCase__ : Accelerator , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : str = "bert-base-cased" , UpperCAmelCase__ : int = 320 , UpperCAmelCase__ : int = 160 , ) -> Union[str, Any]:
lowercase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = load_dataset(
"""glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''} )
def tokenize_function(UpperCAmelCase__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase_ : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowercase_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Dict:
# Initialize accelerator
lowercase_ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ : Dict = config['lr']
lowercase_ : str = int(config["""num_epochs"""] )
lowercase_ : Optional[int] = int(config["""seed"""] )
lowercase_ : Any = int(config["""batch_size"""] )
lowercase_ : List[str] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
lowercase_ : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
lowercase_ : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase_ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
lowercase_ : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowercase_ : Union[str, Any] = 1
lowercase_ : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase_ : str = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
lowercase_ : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
lowercase_ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase_ : Dict = 0
# Now we train the model
lowercase_ : str = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = model(**_SCREAMING_SNAKE_CASE )
lowercase_ : str = outputs.loss
lowercase_ : str = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) )
accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) )
accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) )
accelerator.print(
"""Total Peak Memory consumed during the train (max): {}""".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowercase_ : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--peak_memory_upper_bound""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , )
parser.add_argument(
"""--n_train""" , type=_SCREAMING_SNAKE_CASE , default=320 , help="""Number of training examples to use.""" , )
parser.add_argument(
"""--n_val""" , type=_SCREAMING_SNAKE_CASE , default=160 , help="""Number of validation examples to use.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of train epochs.""" , )
lowercase_ : List[str] = parser.parse_args()
lowercase_ : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 239 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
"""simple docstring"""
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase__ ( *_UpperCamelCase : int ) -> Optional[int]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
SCREAMING_SNAKE_CASE__ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
SCREAMING_SNAKE_CASE__ = torch.device("cuda", local_rank)
SCREAMING_SNAKE_CASE__ = socket.gethostname()
SCREAMING_SNAKE_CASE__ = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
SCREAMING_SNAKE_CASE__ = dist.get_rank()
SCREAMING_SNAKE_CASE__ = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 150 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 0 |
import os
import sys
import transformers
_lowerCamelCase ='3'
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 287 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__a : Tuple = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )]
__a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE )
__a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
__a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
__a : str = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
__a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
__a : List[Any] = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a : List[str] = test.model_tester.__class__
return model_tester
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = []
for test_class in test_classes:
__a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : str = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 27 | 0 |
"""simple docstring"""
import math
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_a = 0
_a = 0
while num > 0:
_a = num % 8
_a = octal + (remainder * math.floor(math.pow(10, _SCREAMING_SNAKE_CASE ) ))
counter += 1
_a = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'0o{int(_SCREAMING_SNAKE_CASE )}'
def A_ ( ):
"""simple docstring"""
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main() | 320 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 | 0 |
import re
import string
import numpy as np
import datasets
_lowercase: Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
_lowercase: List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
_lowercase: Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a = np.array([re.sub(__a , "" , __a ) for x in predictions] )
a = np.array([re.sub(__a , "" , __a ) for x in references] )
else:
a = np.asarray(__a )
a = np.asarray(__a )
if ignore_case:
a = np.char.lower(__a )
a = np.char.lower(__a )
if ignore_punctuation:
a = string.punctuation.maketrans("" , "" , string.punctuation )
a = np.char.translate(__a , table=__a )
a = np.char.translate(__a , table=__a )
if ignore_numbers:
a = string.digits.maketrans("" , "" , string.digits )
a = np.char.translate(__a , table=__a )
a = np.char.translate(__a , table=__a )
a = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 227 |
'''simple docstring'''
import requests
__lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here!
__lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase : Dict = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 27 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A__ ( lowerCAmelCase_ ):
lowercase = 'big_bird'
def __init__( self : Dict , a : List[Any]=50_358 , a : Union[str, Any]=768 , a : Union[str, Any]=12 , a : List[str]=12 , a : int=3_072 , a : Optional[int]="gelu_new" , a : Dict=0.1 , a : str=0.1 , a : int=4_096 , a : List[str]=2 , a : Union[str, Any]=0.0_2 , a : Union[str, Any]=1E-12 , a : Optional[int]=True , a : Dict=0 , a : Any=1 , a : Optional[Any]=2 , a : Any=66 , a : Optional[Any]="block_sparse" , a : List[Any]=True , a : str=False , a : List[Any]=64 , a : List[str]=3 , a : Optional[int]=None , **a : List[str] , ):
'''simple docstring'''
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : Dict = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : str = layer_norm_eps
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : Union[str, Any] = rescale_embeddings
lowerCAmelCase__ : Tuple = attention_type
lowerCAmelCase__ : Any = use_bias
lowerCAmelCase__ : List[Any] = block_size
lowerCAmelCase__ : Optional[Any] = num_random_blocks
lowerCAmelCase__ : List[Any] = classifier_dropout
class A__ ( lowerCAmelCase_ ):
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 212 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__a , self ).__init__()
__a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a )
__a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.bert(**__a ).last_hidden_state
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__a )
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : str = W_supports['sizes'].tolist()
__a : Union[str, Any] = W_supports['start_token_id'].item()
__a : Any = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Tuple = self.BERT(**__a )
__a : str = self.BERT(**__a )
__a : Any = None
__a : Dict = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__a : Optional[int] = 0
else:
__a : Union[str, Any] = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : str = torch.vstack((p_starts, p_start) )
__a : str = torch.vstack((p_ends, p_end) )
else:
__a : List[str] = p_start
__a : int = p_end
return p_starts, p_ends
| 27 | 0 |
import requests
lowerCamelCase = '' # <-- Put your OpenWeatherMap appid here!
lowerCamelCase = 'https://api.openweathermap.org/data/2.5/'
def UpperCAmelCase__ ( _A : str = "Chicago" , _A : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def UpperCAmelCase__ ( _A : str = "Kolkata, India" , _A : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def UpperCAmelCase__ ( _A : float = 55.68 , _A : float = 12.57 , _A : str = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowerCamelCase = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 188 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 0 |
from string import ascii_uppercase
_UpperCAmelCase : List[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
snake_case_ = ''
snake_case_ = 0
snake_case_ = 0
while div != 1:
snake_case_ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
snake_case_ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
snake_case_ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
snake_case_ = num // base
snake_case_ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 285 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ort.SessionOptions()
__a : Dict = False
return options
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = 'A red cat sitting on a park bench'
__a : int = np.random.RandomState(0 )
__a : Tuple = pipe(
prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __magic_name__ ( __snake_case : Dict , __snake_case : Tuple , __snake_case : List[str]=[] ) -> List[str]:
lowercase : Any = size[0] - overlap_pixels * 2
lowercase : List[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowercase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowercase : Tuple = np.pad(_SCREAMING_SNAKE_CASE , mode="linear_ramp" , pad_width=_SCREAMING_SNAKE_CASE , end_values=0 )
if "l" in remove_borders:
lowercase : str = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowercase : int = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowercase : Optional[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowercase : List[str] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __magic_name__ ( __snake_case : Any , __snake_case : Dict , __snake_case : Any ) -> List[str]:
return max(_SCREAMING_SNAKE_CASE , min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __magic_name__ ( __snake_case : [int] , __snake_case : [int] , __snake_case : [int] ) -> List[str]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __magic_name__ ( __snake_case : [int] , __snake_case : int , __snake_case : [int] ) -> Optional[int]:
lowercase : int = list(_SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowercase : str = clamp_rect(_SCREAMING_SNAKE_CASE , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> Any:
lowercase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_SCREAMING_SNAKE_CASE , (original_slice, 0) )
return result
def __magic_name__ ( __snake_case : int , __snake_case : Any ) -> Optional[int]:
lowercase : Tuple = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowercase : Any = tile.crop(_SCREAMING_SNAKE_CASE )
return tile
def __magic_name__ ( __snake_case : str , __snake_case : Union[str, Any] ) -> Dict:
lowercase : int = n % d
return n - divisor
class a__ ( lowerCAmelCase_ ):
def __init__( self , _a , _a , _a , _a , _a , _a , _a = 350 , ):
super().__init__(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , low_res_scheduler=__a , scheduler=__a , max_noise_level=__a , )
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a , _a , **_a ):
torch.manual_seed(0 )
lowercase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowercase : List[Any] = add_overlap_rect(__a , __a , image.size )
lowercase : Tuple = image.crop(__a )
lowercase : Union[str, Any] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowercase : Tuple = translated_slice_x - (original_image_slice / 2)
lowercase : Optional[Any] = max(0 , __a )
lowercase : List[Any] = squeeze_tile(__a , __a , __a , __a )
lowercase : int = to_input.size
lowercase : Union[str, Any] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowercase : str = super(__a , self ).__call__(image=__a , **__a ).images[0]
lowercase : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowercase : Tuple = unsqueeze_tile(__a , __a )
lowercase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowercase : Dict = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowercase : Dict = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__a ) , mode="L" , )
final_image.paste(
__a , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __a )
@torch.no_grad()
def __call__( self , _a , _a , _a = 75 , _a = 9.0 , _a = 50 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = None , _a = 1 , _a = 128 , _a = 32 , _a = 32 , ):
lowercase : Tuple = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowercase : Dict = math.ceil(image.size[0] / tile_size )
lowercase : Optional[int] = math.ceil(image.size[1] / tile_size )
lowercase : int = tcx * tcy
lowercase : List[str] = 0
for y in range(__a ):
for x in range(__a ):
self._process_tile(
__a , __a , __a , __a , __a , __a , __a , prompt=__a , num_inference_steps=__a , guidance_scale=__a , noise_level=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def __magic_name__ ( ) -> Any:
# Run a demo
lowercase : Any = 'stabilityai/stable-diffusion-x4-upscaler'
lowercase : int = StableDiffusionTiledUpscalePipeline.from_pretrained(_SCREAMING_SNAKE_CASE , revision="fp16" , torch_dtype=torch.floataa )
lowercase : Union[str, Any] = pipe.to("cuda" )
lowercase : Union[str, Any] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(__snake_case : str ):
print(f"""progress: {obj["progress"]:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowercase : Dict = pipe(image=_SCREAMING_SNAKE_CASE , prompt="Black font, white background, vector" , noise_level=40 , callback=_SCREAMING_SNAKE_CASE )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 202 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Dict = 16
__lowercase : List[Any] = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : Dict = torch.cuda.memory_allocated()
__a : List[Any] = torch.cuda.max_memory_allocated()
__a : Tuple = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : int = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
# Initialize accelerator
__a : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Dict = config['lr']
__a : str = int(config['num_epochs'] )
__a : Optional[int] = int(config['seed'] )
__a : Any = int(config['batch_size'] )
__a : List[str] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Union[str, Any] = 1
__a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : str = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : str = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : str = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[str] = parser.parse_args()
__a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase : Any = '''mobilenet_v2'''
def __init__( self : Tuple , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Optional[int]=2_2_4 , lowerCAmelCase_ : str=1.0 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : List[str]=6 , lowerCAmelCase_ : int=3_2 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple="relu6" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=0.8 , lowerCAmelCase_ : Optional[Any]=0.02 , lowerCAmelCase_ : Tuple=0.001 , lowerCAmelCase_ : Tuple=2_5_5 , **lowerCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A: Any = num_channels
_A: Dict = image_size
_A: Optional[Any] = depth_multiplier
_A: List[str] = depth_divisible_by
_A: List[str] = min_depth
_A: Any = expand_ratio
_A: Optional[Any] = output_stride
_A: str = first_layer_is_expansion
_A: Optional[Any] = finegrained_output
_A: Optional[Any] = hidden_act
_A: List[Any] = tf_padding
_A: Optional[int] = classifier_dropout_prob
_A: Union[str, Any] = initializer_range
_A: int = layer_norm_eps
_A: str = semantic_loss_ignore_index
class UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse('''1.11''' )
@property
def __magic_name__ ( self : int ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __magic_name__ ( self : Any ):
"""simple docstring"""
return 1e-4
| 121 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27 | 0 |
import numpy as np
def __lowercase ( lowerCamelCase : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[Any] = torch.device('cpu')
def lowerCamelCase ():
__a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = dct.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = []
for k in state_dict.keys():
__a : List[Any] = k
if ".pwconv" in k:
__a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[str] = 1_000
__a : Tuple = 'huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : Dict = [3, 3, 6, 4]
__a : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Dict = [3, 3, 9, 6]
__a : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : Dict = [4, 3, 10, 5]
__a : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Tuple = [4, 4, 12, 6]
__a : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[Any] = checkpoint
__a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Tuple = prepare_img()
__a : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 27 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
default='''cifar10''', metadata={'''help''': '''Name of a dataset from the datasets package'''})
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''})
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={'''help''': '''The column name of the images in the files.'''})
UpperCamelCase__ = field(default=lowerCAmelCase_, metadata={'''help''': '''A folder containing the training data.'''})
UpperCamelCase__ = field(default=lowerCAmelCase_, metadata={'''help''': '''A folder containing the validation data.'''})
UpperCamelCase__ = field(
default=0.15, metadata={'''help''': '''Percent to split off of train for validation.'''})
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = {}
if self.train_dir is not None:
lowercase_ : List[str] = self.train_dir
if self.validation_dir is not None:
lowercase_ : Union[str, Any] = self.validation_dir
lowercase_ : Union[str, Any] = data_files if data_files else None
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
}, )
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''})
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
}, )
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''})
UpperCamelCase__ = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
UpperCamelCase__ = field(default=lowerCAmelCase_, metadata={'''help''': '''Name or path of preprocessor config.'''})
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
UpperCamelCase__ = field(
default=0.75, metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''})
UpperCamelCase__ = field(
default=lowerCAmelCase_, metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''})
@dataclass
class __magic_name__ ( lowerCAmelCase_):
UpperCamelCase__ = field(
default=1e-3, metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''})
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
lowercase_ : Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ : str = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
lowercase_ : List[str] = ds['train'].train_test_split(data_args.train_val_split )
lowercase_ : str = split['train']
lowercase_ : List[Any] = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : int = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase_ : Optional[int] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
lowercase_ : str = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ : Dict = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase_ : Optional[int] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
lowercase_ : Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase_ : Optional[int] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ : Dict = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
if training_args.do_train:
lowercase_ : str = ds['train'].column_names
else:
lowercase_ : str = ds['validation'].column_names
if data_args.image_column_name is not None:
lowercase_ : List[str] = data_args.image_column_name
elif "image" in column_names:
lowercase_ : Optional[Any] = 'image'
elif "img" in column_names:
lowercase_ : int = 'img'
else:
lowercase_ : List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase_ : Tuple = image_processor.size['shortest_edge']
else:
lowercase_ : Union[str, Any] = (image_processor.size['height'], image_processor.size['width'])
lowercase_ : Dict = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(_SCREAMING_SNAKE_CASE , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(UpperCAmelCase__ : List[Any] ):
lowercase_ : Optional[int] = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ : Any = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ : Tuple = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Compute absolute learning rate
lowercase_ : int = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase_ : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowercase_ : Dict = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase_ : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
lowercase_ : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ : int = last_checkpoint
lowercase_ : Optional[int] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ : Dict = trainer.evaluate()
trainer.log_metrics("""eval""" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("""eval""" , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
lowercase_ : List[str] = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 239 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "umt5"
A_ = ["past_key_values"]
def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
__a : Any = vocab_size
__a : Any = d_model
__a : str = d_kv
__a : Dict = d_ff
__a : Union[str, Any] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : Tuple = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : Optional[int] = dropout_rate
__a : List[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Any = use_cache
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : Dict = act_info[-1]
__a : Dict = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_heads
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_layers
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Dict = 'past_encoder_sequence + sequence'
__a : Tuple = {0: 'batch'}
__a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 5E-4
| 27 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_lowerCAmelCase : List[str] = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_lowerCAmelCase : List[str] = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_lowerCAmelCase : str = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_lowerCAmelCase : Any = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def snake_case ( cls , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 150 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 | 0 |
def _a ( lowerCamelCase, lowerCamelCase ):
while b:
lowerCamelCase : Any = b, a % b
return a
def _a ( lowerCamelCase, lowerCamelCase ):
return a if b == 0 else euclidean_gcd_recursive(_SCREAMING_SNAKE_CASE, a % b )
def _a ( ):
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}''' )
if __name__ == "__main__":
main()
| 287 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__snake_case = 'base_with_context'
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict ):
"""simple docstring"""
_a = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
_a = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
_a = weights[f'layers_{lyr_num}']
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_a = ly_weight['attention']
_a = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
_a = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
_a = weights[f'layers_{lyr_num}']
_a = ly_weight['attention']
_a = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
_a = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=_SCREAMING_SNAKE_CASE )
_a = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_a = weights[f'layers_{lyr_num}']
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
_a = ly_weight['self_attention']
_a = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_a = ly_weight['MultiHeadDotProductAttention_0']
_a = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_a = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_a = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
_a = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
_a = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_a = jnp.tree_util.tree_map(onp.array, _SCREAMING_SNAKE_CASE )
_a = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
_a = os.path.join(args.checkpoint_path, '''..''', '''config.gin''' )
_a = inference.parse_training_gin_file(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
_a = inference.InferenceModel(args.checkpoint_path, _SCREAMING_SNAKE_CASE )
_a = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''', variance_type='''fixed_large''' )
_a = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
_a = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['''targets_context'''], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
_a = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['''targets_context'''], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
_a = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''], _SCREAMING_SNAKE_CASE )
_a = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''], _SCREAMING_SNAKE_CASE )
_a = load_decoder(ta_checkpoint['''target''']['''decoder'''], _SCREAMING_SNAKE_CASE )
_a = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
_a = SpectrogramDiffusionPipeline(
notes_encoder=_SCREAMING_SNAKE_CASE, continuous_encoder=_SCREAMING_SNAKE_CASE, decoder=_SCREAMING_SNAKE_CASE, scheduler=_SCREAMING_SNAKE_CASE, melgan=_SCREAMING_SNAKE_CASE, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__snake_case = parser.parse_args()
main(args) | 320 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
for attribute in key.split('.' ):
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Union[str, Any] = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : int = []
__a : List[str] = fairseq_model.state_dict()
__a : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a : int = None
for name, value in fairseq_dict.items():
__a : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : List[str] = True
elif name.split('.' )[0] == "proj":
__a : Tuple = fairseq_model.proj
__a : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : List[Any] = True
if "*" in mapped_key:
__a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : List[Any] = 'weight_g'
elif "weight_v" in name:
__a : List[Any] = 'weight_v'
elif "bias" in name:
__a : Optional[Any] = 'bias'
elif "weight" in name:
__a : Tuple = 'weight'
else:
__a : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : List[str] = full_name.split('conv_layers.' )[-1]
__a : Any = name.split('.' )
__a : List[str] = int(items[0] )
__a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a , __a : List[str] = emb.weight.shape
__a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Tuple = [line.split(' ' )[0] for line in lines]
__a : int = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ):
__a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__a : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
__a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
__a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
__a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
__a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
__a : int = False
# add projection layer
__a : str = nn.Parameter(projection_layer.weight )
__a : Any = nn.Parameter(projection_layer.bias )
__a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = hf_wavavec.config.to_dict()
__a : Tuple = tokenizer.pad_token_id
__a : Optional[int] = tokenizer.bos_token_id
__a : Union[str, Any] = tokenizer.eos_token_id
__a : Tuple = 'speech_to_text_2'
__a : Tuple = 'wav2vec2'
__a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 27 | 0 |
from math import ceil
def a( A : Optional[int] , A : int ) -> Union[str, Any]:
"""simple docstring"""
a = list(range(0 , _SCREAMING_SNAKE_CASE ) )
a = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
a = []
for i in device_map_blocks:
if device_map_blocks.count(_SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_SCREAMING_SNAKE_CASE )
# Missing blocks
a = [i for i in blocks if i not in device_map_blocks]
a = [i for i in device_map_blocks if i not in blocks]
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(_SCREAMING_SNAKE_CASE ) )
def a( A : List[str] , A : Optional[int] ) -> List[str]:
"""simple docstring"""
a = list(range(_SCREAMING_SNAKE_CASE ) )
a = int(ceil(n_layers / len(_SCREAMING_SNAKE_CASE ) ) )
a = [layers[i : i + n_blocks] for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
| 227 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__lowercase : Dict = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__lowercase : Tuple = torch.device('cuda', local_rank)
__lowercase : Optional[int] = socket.gethostname()
__lowercase : List[str] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : str = dist.get_rank()
__lowercase : Union[str, Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 27 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class A__ ( lowerCAmelCase_ , unittest.TestCase ):
lowercase = CpmAntTokenizer
lowercase = False
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Any = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowerCAmelCase__ : Any = '今天天气真好!'
lowerCAmelCase__ : int = ['今天', '天气', '真', '好', '!']
lowerCAmelCase__ : int = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCAmelCase__ : int = '今天天气真好!'
lowerCAmelCase__ : Tuple = [tokenizer.bos_token] + tokens
lowerCAmelCase__ : Optional[Any] = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
lowerCAmelCase__ : Tuple = tokenizer.decode(__a )
self.assertEqual(__a , __a ) | 212 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase = random.Random()
def UpperCAmelCase__ ( _A : Any , _A : List[str]=1.0 , _A : Any=None , _A : Optional[int]=None ):
'''simple docstring'''
if rng is None:
a__ =global_rng
a__ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=7, lowercase_=400, lowercase_=2000, lowercase_=1, lowercase_=0.0, lowercase_=16000, lowercase_=True, lowercase_=80, lowercase_=16, lowercase_=64, lowercase_="hann_window", lowercase_=80, lowercase_=7600, lowercase_=1E-10, lowercase_=True, ) -> Optional[int]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =min_seq_length
a__ =max_seq_length
a__ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ =feature_size
a__ =padding_value
a__ =sampling_rate
a__ =do_normalize
a__ =num_mel_bins
a__ =hop_length
a__ =win_length
a__ =win_function
a__ =fmin
a__ =fmax
a__ =mel_floor
a__ =return_attention_mask
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _UpperCAmelCase ( self, lowercase_=False, lowercase_=False ) -> Dict:
"""simple docstring"""
def _flatten(lowercase_ ):
return list(itertools.chain(*__a ) )
if equal_length:
a__ =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__ =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
a__ =[np.asarray(__a ) for x in speech_inputs]
return speech_inputs
def _UpperCAmelCase ( self, lowercase_=False, lowercase_=False ) -> Dict:
"""simple docstring"""
if equal_length:
a__ =[floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ =[
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
a__ =[np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = SpeechTaFeatureExtractor
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =SpeechTaFeatureExtractionTester(self )
def _UpperCAmelCase ( self, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(__a, axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__a, axis=0 ) - 1 ) < 1E-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =[np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
a__ =feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values
a__ =feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
# Test batched
a__ =feat_extract(__a, return_tensors='''np''' ).input_values
a__ =feat_extract(__a, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__a, __a ):
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =['longest', 'max_length', 'do_not_pad']
a__ =[None, 1600, None]
for max_length, padding in zip(__a, __a ):
a__ =feat_extract(__a, padding=__a, max_length=__a, return_tensors='''np''' )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =range(800, 1400, 200 )
a__ =[floats_list((1, x) )[0] for x in lengths]
a__ =['longest', 'max_length', 'do_not_pad']
a__ =[None, 1600, None]
for max_length, padding in zip(__a, __a ):
a__ =feat_extract(__a, max_length=__a, padding=__a )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =feat_extract(
__a, truncation=__a, max_length=1000, padding='''max_length''', return_tensors='''np''' )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =feat_extract(
__a, truncation=__a, max_length=1000, padding='''longest''', return_tensors='''np''' )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =feat_extract(
__a, truncation=__a, max_length=2000, padding='''longest''', return_tensors='''np''' )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =np.random.rand(100 ).astype(np.floataa )
a__ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ =feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__ =feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =[np.asarray(__a ) for speech_input in speech_inputs]
# Test feature size
a__ =feature_extractor(audio_target=__a, padding=__a, return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a__ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_values
a__ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
# Test batched
a__ =feature_extractor(__a, return_tensors='''np''' ).input_values
a__ =feature_extractor(__a, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__a, __a ):
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
a__ =np.asarray(__a )
a__ =feature_extractor(__a, return_tensors='''np''' ).input_values
a__ =feature_extractor(__a, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__a, __a ):
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.feat_extract_tester.prepare_inputs_for_target()
a__ =self.feature_extraction_class(**self.feat_extract_dict )
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__a ) == len(__a ) for x, y in zip(__a, processed_features[input_name] ) ) )
a__ =self.feat_extract_tester.prepare_inputs_for_target(equal_length=__a )
a__ =BatchFeature({input_name: speech_inputs}, tensor_type='''np''' )
a__ =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__ =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =self.feat_extract_tester.prepare_inputs_for_target(equal_length=__a )
a__ =self.feature_extraction_class(**self.feat_extract_dict )
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs}, tensor_type='''pt''' )
a__ =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__ =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_dict )
a__ =self.feat_extract_tester.prepare_inputs_for_target()
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs} )
a__ =feat_extract.num_mel_bins # hack!
a__ =feat_extract.pad(__a, padding='''longest''', return_tensors='''np''' )[input_name]
a__ =feat_extract.pad(__a, padding='''longest''', return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =self.feat_extract_dict
a__ =True
a__ =self.feature_extraction_class(**__a )
a__ =self.feat_extract_tester.prepare_inputs_for_target()
a__ =[len(__a ) for x in speech_inputs]
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs} )
a__ =feat_extract.num_mel_bins # hack!
a__ =feat_extract.pad(__a, padding='''longest''', return_tensors='''np''' )
self.assertIn('''attention_mask''', __a )
self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), __a )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.feat_extract_dict
a__ =True
a__ =self.feature_extraction_class(**__a )
a__ =self.feat_extract_tester.prepare_inputs_for_target()
a__ =[len(__a ) for x in speech_inputs]
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs} )
a__ =min(__a )
a__ =feat_extract.num_mel_bins # hack!
a__ =feat_extract.pad(
__a, padding='''max_length''', max_length=__a, truncation=__a, return_tensors='''np''' )
self.assertIn('''attention_mask''', __a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] )
def _UpperCAmelCase ( self, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
from datasets import load_dataset
a__ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
a__ =ds.sort('''id''' ).select(range(__a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
a__ =self._load_datasamples(1 )
a__ =SpeechTaFeatureExtractor()
a__ =feature_extractor(__a, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30], __a, atol=1E-6 ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
a__ =self._load_datasamples(1 )
a__ =SpeechTaFeatureExtractor()
a__ =feature_extractor(audio_target=__a, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30], __a, atol=1E-4 ) )
| 188 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowercase : Tuple = pytest.mark.integration
__lowercase : Optional[int] = {'comet'}
__lowercase : List[str] = importlib.util.find_spec('fairseq') is not None
__lowercase : str = {'code_eval'}
__lowercase : List[Any] = os.name == 'nt'
__lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase ():
__a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@local
class __UpperCamelCase ( parameterized.TestCase ):
A_ = {}
A_ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = '[...]'
__a : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
__a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__a : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = '[...]'
__a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def __UpperCAmelCase ( self ):
'''simple docstring'''
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join('metrics' , __a ) , *__a , **__a )
with patch('datasets.load_metric' ) as mock_load_metric:
__a : Dict = load_local_metric
yield
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
def wrapper(__a ):
__a : Optional[Any] = contextmanager(__a )
__a : str = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__a : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
import torch
def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__a : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ):
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
assert len(__a ) == 2
__a : Dict = [0.19, 0.92]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__a : str = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__a : int = load_from_checkpoint
yield
def lowerCamelCase ():
__a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) )
__a : List[str] = 'ERROR'
__a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
| 27 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = 9, 14 # noqa: F841
snake_case_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case_ = defaultdict(_SCREAMING_SNAKE_CASE )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case_ = mst(_SCREAMING_SNAKE_CASE )
snake_case_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case_ = tuple(answer[:2] )
snake_case_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 285 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] )
__a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] )
else:
__a : int = np.asarray(__a )
__a : str = np.asarray(__a )
if ignore_case:
__a : Dict = np.char.lower(__a )
__a : List[str] = np.char.lower(__a )
if ignore_punctuation:
__a : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Dict = np.char.translate(__a , table=__a )
if ignore_numbers:
__a : Optional[int] = string.digits.maketrans('' , '' , string.digits )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Optional[int] = np.char.translate(__a , table=__a )
__a : Any = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 27 | 0 |
"""simple docstring"""
def __magic_name__ ( __snake_case : int , __snake_case : int , __snake_case : list[list[int]] ) -> Optional[int]:
def update_area_of_max_square(__snake_case : int , __snake_case : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase : Dict = update_area_of_max_square(_SCREAMING_SNAKE_CASE , col + 1 )
lowercase : Dict = update_area_of_max_square(row + 1 , col + 1 )
lowercase : Optional[Any] = update_area_of_max_square(row + 1 , _SCREAMING_SNAKE_CASE )
if mat[row][col]:
lowercase : str = 1 + min([right, diagonal, down] )
lowercase : int = max(largest_square_area[0] , _SCREAMING_SNAKE_CASE )
return sub_problem_sol
else:
return 0
lowercase : Any = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __magic_name__ ( __snake_case : int , __snake_case : int , __snake_case : list[list[int]] ) -> Any:
def update_area_of_max_square_using_dp_array(
__snake_case : int , __snake_case : int , __snake_case : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase : Tuple = update_area_of_max_square_using_dp_array(_SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
lowercase : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _SCREAMING_SNAKE_CASE )
lowercase : Any = update_area_of_max_square_using_dp_array(row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if mat[row][col]:
lowercase : Union[str, Any] = 1 + min([right, diagonal, down] )
lowercase : Any = max(largest_square_area[0] , _SCREAMING_SNAKE_CASE )
lowercase : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase : List[str] = [0]
lowercase : List[Any] = [[-1] * cols for _ in range(_SCREAMING_SNAKE_CASE )]
update_area_of_max_square_using_dp_array(0 , 0 , _SCREAMING_SNAKE_CASE )
return largest_square_area[0]
def __magic_name__ ( __snake_case : int , __snake_case : int , __snake_case : list[list[int]] ) -> List[str]:
lowercase : List[str] = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase : Optional[Any] = dp_array[row][col + 1]
lowercase : Union[str, Any] = dp_array[row + 1][col + 1]
lowercase : int = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase : Tuple = 1 + min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase : int = max(dp_array[row][col] , _SCREAMING_SNAKE_CASE )
else:
lowercase : Union[str, Any] = 0
return largest_square_area
def __magic_name__ ( __snake_case : int , __snake_case : int , __snake_case : list[list[int]] ) -> str:
lowercase : Tuple = [0] * (cols + 1)
lowercase : Dict = [0] * (cols + 1)
lowercase : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase : Optional[Any] = current_row[col + 1]
lowercase : Union[str, Any] = next_row[col + 1]
lowercase : Dict = next_row[col]
if mat[row][col] == 1:
lowercase : int = 1 + min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase : Union[str, Any] = max(current_row[col] , _SCREAMING_SNAKE_CASE )
else:
lowercase : Union[str, Any] = 0
lowercase : List[str] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 202 |
'''simple docstring'''
import os
import sys
__lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 27 | 0 |
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = '''WhisperFeatureExtractor'''
__UpperCamelCase : Any = '''WhisperTokenizer'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
super().__init__(__a , __a )
_A: List[str] = self.feature_extractor
_A: Optional[int] = False
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Any=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a )
def __call__( self : Union[str, Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
_A: Union[str, Any] = kwargs.pop('''audio''' , __a )
_A: Tuple = kwargs.pop('''sampling_rate''' , __a )
_A: str = kwargs.pop('''text''' , __a )
if len(__a ) > 0:
_A: Optional[Any] = args[0]
_A: List[Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_A: List[Any] = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
_A: Any = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_A: Tuple = encodings['input_ids']
return inputs
def __magic_name__ ( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__a , **__a )
def __magic_name__ ( self : int , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : int ):
"""simple docstring"""
return self.tokenizer.decode(*__a , **__a )
def __magic_name__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(__a , return_tensors=__a )
| 121 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _lowercase ( lowerCAmelCase_ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
"""simple docstring"""
with self.assertRaises(__a ):
UpperCamelCase_ : Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
"""simple docstring"""
with self.assertRaises(__a ):
UpperCamelCase_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('bool' ) , type=Value('int64' ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase_ : Union[str, Any] = pa.array(TypedSequence(['foo', 'bar'] , type=Value('int64' ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[str] = pa.array(TypedSequence(['foo', 'bar'] , try_type=Value('int64' ) ) )
self.assertEqual(arr.type , pa.string() )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase_ : Any = pa.array(TypedSequence(['foo', 'bar'] , type=ArrayaD((1, 3) , 'int64' ) ) )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = pa.array(TypedSequence(['foo', 'bar'] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
import PIL.Image
UpperCamelCase_ : Union[str, Any] = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' , side_effect=__a ) as mock_cast_to_python_objects:
UpperCamelCase_ : Union[str, Any] = pa.array(TypedSequence([{'path': None, 'bytes': B'image_bytes'}, pil_image] , type=Image() ) )
UpperCamelCase_ : Optional[Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' , __a )
self.assertFalse(kwargs['optimize_list_casting'] )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : int ):
UpperCamelCase_ : str = pa.BufferReader(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , pa.Buffer ) else pa.memory_map(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Dict = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : str = pa.BufferOutputStream()
UpperCamelCase_ : Any = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCamelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase ( ):
UpperCamelCase_ : int = pa.BufferOutputStream()
UpperCamelCase_ : Optional[int] = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
UpperCamelCase_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCamelCase_ : int = pa.BufferReader(output.getvalue() )
UpperCamelCase_ : Tuple = pa.ipc.open_stream(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : pa.Table = f.read_all()
UpperCamelCase_ : Optional[int] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def __lowercase ( lowerCamelCase : Any ):
UpperCamelCase_ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt='split_name' , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
UpperCamelCase_ : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : Tuple = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt='split_name' , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
UpperCamelCase_ : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def __lowercase ( lowerCamelCase : List[str] ):
UpperCamelCase_ : List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE , hash_salt='split_name' , check_duplicates=_SCREAMING_SNAKE_CASE , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
UpperCamelCase_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ):
UpperCamelCase_ : Optional[int] = pa.BufferOutputStream()
UpperCamelCase_ : int = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
UpperCamelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ : str = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : List[Any] ):
UpperCamelCase_ : Tuple = pa.BufferOutputStream()
UpperCamelCase_ : str = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
UpperCamelCase_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ : List[str] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] ):
UpperCamelCase_ : List[Any] = pa.BufferOutputStream()
UpperCamelCase_ : List[Any] = pa.schema(_SCREAMING_SNAKE_CASE ) if fields else None
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , writer_batch_size=_SCREAMING_SNAKE_CASE ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
UpperCamelCase_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowercase ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : List[Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
UpperCamelCase_ : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , 'test.arrow' )
with ArrowWriter(path=_SCREAMING_SNAKE_CASE , schema=pa.schema(_SCREAMING_SNAKE_CASE ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
UpperCamelCase_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_SCREAMING_SNAKE_CASE , metadata=writer._schema.metadata )
_check_output(_SCREAMING_SNAKE_CASE , 1 )
def __lowercase ( lowerCamelCase : int ):
if pa.types.is_list(_SCREAMING_SNAKE_CASE ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : int ):
if isinstance(lst[0] , _SCREAMING_SNAKE_CASE ):
change_first_primitive_element_in_list(lst[0] , _SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ : List[str] = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Optional[Any] = pa.array(TypedSequence(_SCREAMING_SNAKE_CASE , optimized_int_type=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ):
# in range
UpperCamelCase_ : Any = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE , col=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCamelCase_ : Optional[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[str] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Dict = pa.array(OptimizedTypedSequence(_SCREAMING_SNAKE_CASE , col=_SCREAMING_SNAKE_CASE ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Union[str, Any] = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=_SCREAMING_SNAKE_CASE ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : Optional[Any] = 'mock://dataset-train.arrow'
with ArrowWriter(path=_SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_SCREAMING_SNAKE_CASE ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCamelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_SCREAMING_SNAKE_CASE )
def __lowercase ( ):
UpperCamelCase_ : List[Any] = pa.BufferOutputStream()
with ParquetWriter(stream=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCamelCase_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCamelCase_ : Optional[Any] = pa.BufferReader(output.getvalue() )
UpperCamelCase_ : pa.Table = pq.read_table(_SCREAMING_SNAKE_CASE )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
import PIL.Image
UpperCamelCase_ : List[Any] = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_SCREAMING_SNAKE_CASE , format='png' )
UpperCamelCase_ : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(
stream=_SCREAMING_SNAKE_CASE , features=Features({'image': Image()} ) , embed_local_files=_SCREAMING_SNAKE_CASE ) as writer:
writer.write({'image': image_path} )
writer.finalize()
UpperCamelCase_ : Any = pa.BufferReader(output.getvalue() )
UpperCamelCase_ : pa.Table = pq.read_table(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , _SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowercase ( ):
UpperCamelCase_ : Tuple = pa.schema([pa.field('col_1' , pa.string() , nullable=_SCREAMING_SNAKE_CASE )] )
UpperCamelCase_ : str = pa.BufferOutputStream()
with ArrowWriter(stream=_SCREAMING_SNAKE_CASE ) as writer:
writer._build_writer(inferred_schema=_SCREAMING_SNAKE_CASE )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 175 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[Any] = tmp_path / 'file.csv'
__a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : str = tmp_path / 'malformed_file.csv'
__a : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = tmp_path / 'csv_with_image.csv'
__a : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Union[str, Any] = tmp_path / 'csv_with_label.csv'
__a : Any = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = tmp_path / 'csv_with_int_list.csv'
__a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : int = Csv()
__a : str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1]
__a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__a : Any = csv._generate_tables([[csv_file_with_image]] )
__a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__a : Any = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__a : List[str] = csv._generate_tables([[csv_file_with_label]] )
__a : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__a : int = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__a : Any = csv._generate_tables([[csv_file_with_int_list]] )
__a : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__a : Tuple = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 27 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __magic_name__ ( pl.LightningModule):
def __init__( self : str , lowercase_ : Optional[int] ):
super().__init__()
lowercase_ : Optional[Any] = model
lowercase_ : Any = 2
lowercase_ : Any = nn.Linear(self.model.config.hidden_size , self.num_labels )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
pass
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Dict:
# load longformer model from model identifier
lowercase_ : str = LongformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
lowercase_ : Any = LightningModel(_SCREAMING_SNAKE_CASE )
lowercase_ : str = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
lowercase_ : Any = LongformerForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : List[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 239 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=__a )
model.to(__a )
model.eval()
snake_case = model(__a , attention_mask=__a )
snake_case = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = BioGptForCausalLM(config=__a )
model.to(__a )
model.eval()
snake_case = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=__a )
model.to(__a )
model.eval()
# create attention mask
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=__a )
snake_case = self.seq_length // 2
snake_case = 0
# first forward pass
snake_case = model(__a , attention_mask=__a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case = ids_tensor((1,) , __a ).item() + 1
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__a )] , dim=1 , )
# get two different outputs
snake_case = model(__a , attention_mask=__a )['last_hidden_state']
snake_case = model(__a , past_key_values=__a , attention_mask=__a )['last_hidden_state']
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=__a ).to(__a ).eval()
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=__a )
# first forward pass
snake_case = model(__a , attention_mask=__a , use_cache=__a )
snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(__a , attention_mask=__a )['last_hidden_state']
snake_case = model(__a , attention_mask=__a , past_key_values=__a )[
'last_hidden_state'
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
snake_case = BioGptForCausalLM(__a )
model.to(__a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(__a )
snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = BioGptForTokenClassification(__a )
model.to(__a )
model.eval()
snake_case = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
snake_case
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Any = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptModelTester(self )
snake_case = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__a )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__a , gradient_checkpointing=__a )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__a )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__a )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__a )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__a )
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = 'left'
# Define PAD Token = EOS Token = 50256
snake_case = tokenizer.eos_token
snake_case = model.config.eos_token_id
# use different length sentences to test batching
snake_case = [
'Hello, my dog is a little',
'Today, I',
]
snake_case = tokenizer(__a , return_tensors='pt' , padding=__a )
snake_case = inputs['input_ids'].to(__a )
snake_case = model.generate(
input_ids=__a , attention_mask=inputs['attention_mask'].to(__a ) , )
snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(__a )
snake_case = model.generate(input_ids=__a )
snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(__a )
snake_case = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
snake_case = tokenizer.batch_decode(__a , skip_special_tokens=__a )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
snake_case = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BioGptModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(__a )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = BioGptForSequenceClassification(__a )
model.to(__a )
model.eval()
snake_case = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = 'multi_label_classification'
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(__a )
snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case = BioGptForSequenceClassification(__a )
model.to(__a )
model.eval()
snake_case = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
snake_case = model(__a )[0]
snake_case = 4_23_84
snake_case = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __a )
snake_case = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__a )
torch.manual_seed(0 )
snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(__a )
snake_case = model.generate(
**__a , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__a , )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=__a )
snake_case = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(__a , __a )
| 150 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class A__ :
def __init__( self , __magic_name__ , ):
lowerCamelCase : Any = parent
lowerCamelCase : Any = 1_3
lowerCamelCase : Dict = 7
lowerCamelCase : List[str] = 3_0
lowerCamelCase : str = self.seq_length + self.mem_len
lowerCamelCase : int = 1_5
lowerCamelCase : List[Any] = True
lowerCamelCase : List[str] = True
lowerCamelCase : List[str] = 9_9
lowerCamelCase : Optional[int] = [1_0, 5_0, 8_0]
lowerCamelCase : Dict = 3_2
lowerCamelCase : List[Any] = 3_2
lowerCamelCase : Optional[int] = 4
lowerCamelCase : Optional[Any] = 8
lowerCamelCase : Union[str, Any] = 1_2_8
lowerCamelCase : Any = 2
lowerCamelCase : List[str] = 2
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = 1
lowerCamelCase : int = 0
lowerCamelCase : Tuple = 3
lowerCamelCase : List[str] = self.vocab_size - 1
lowerCamelCase : Tuple = 0.01
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Tuple = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase__ ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Optional[Any] = TFTransfoXLModel(__a )
lowerCamelCase : Optional[Any] = model(__a ).to_tuple()
lowerCamelCase : Union[str, Any] = {'input_ids': input_ids_a, 'mems': mems_a}
lowerCamelCase : Optional[Any] = model(__a ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : Tuple = TFTransfoXLLMHeadModel(__a )
lowerCamelCase : Optional[int] = model(__a ).to_tuple()
lowerCamelCase : Any = {'input_ids': input_ids_a, 'labels': lm_labels}
lowerCamelCase : Any = model(__a ).to_tuple()
lowerCamelCase : List[Any] = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase : Dict = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
lowerCamelCase : Tuple = model(__a ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowerCamelCase : int = TFTransfoXLForSequenceClassification(__a )
lowerCamelCase : str = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
lowerCamelCase : int = self.prepare_config_and_inputs()
(lowerCamelCase) : str = config_and_inputs
lowerCamelCase : Optional[int] = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase):
_UpperCAmelCase : Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCAmelCase : int = () if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : Optional[Any] = False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = TFTransfoXLModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , d_embed=3_7 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
self.model_tester.set_seed()
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__a )
def UpperCamelCase__ ( self ):
self.model_tester.set_seed()
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase : Tuple = model_class(__a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase : Union[str, Any] = model.get_output_embeddings()
assert isinstance(__a , tf.keras.layers.Layer )
lowerCamelCase : List[str] = model.get_bias()
assert name is None
else:
lowerCamelCase : Any = model.get_output_embeddings()
assert x is None
lowerCamelCase : str = model.get_bias()
assert name is None
def UpperCamelCase__ ( self ):
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Tuple = TFTransfoXLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason="""This model doesn\'t play well with fit() due to not returning a single loss.""" )
def UpperCamelCase__ ( self ):
pass
@require_tf
class A__ ( unittest.TestCase):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
lowerCamelCase : List[Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase : List[Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase : Optional[Any] = model.generate(__a , max_length=2_0_0 , do_sample=__a )
self.assertListEqual(output_ids[0].numpy().tolist() , __a )
| 287 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__a : Tuple = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )]
__a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE )
__a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
__a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
__a : str = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
__a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
__a : List[Any] = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a : List[str] = test.model_tester.__class__
return model_tester
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = []
for test_class in test_classes:
__a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : str = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 27 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCamelCase ( lowerCAmelCase_ ):
'''simple docstring'''
A_ : Optional[Any] = 'EncodecFeatureExtractor'
A_ : Tuple = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
super().__init__(__a , __a )
_a = self.feature_extractor
_a = False
def _UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Tuple:
return self.tokenizer.get_decoder_prompt_ids(task=__a , language=__a , no_timestamps=__a )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
_a = kwargs.pop('''audio''' , __a )
_a = kwargs.pop('''sampling_rate''' , __a )
_a = kwargs.pop('''text''' , __a )
if len(__a ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
_a = self.tokenizer(__a , **__a )
if audio is not None:
_a = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_a = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_a = audio_inputs['padding_mask']
return inputs
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
_a = kwargs.pop('''audio''' , __a )
_a = kwargs.pop('''padding_mask''' , __a )
if len(__a ) > 0:
_a = args[0]
_a = args[1:]
if audio_values is not None:
return self._decode_audio(__a , padding_mask=__a )
else:
return self.tokenizer.batch_decode(*__a , **__a )
def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
return self.tokenizer.decode(*__a , **__a )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Optional[int]:
_a = to_numpy(__a )
_a = audio_values.shape
if padding_mask is None:
return list(__a )
_a = to_numpy(__a )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_a = seq_len - padding_mask.shape[-1]
_a = 1 - self.feature_extractor.padding_value
_a = np.pad(__a , ((0, 0), (0, difference)) , '''constant''' , constant_values=__a )
_a = audio_values.tolist()
for i in range(__a ):
_a = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_a = sliced_audio.reshape(__a , -1 )
return audio_values | 320 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a( A : int ) -> Any:
"""simple docstring"""
a = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = emb.weight.shape
a = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
a = emb.weight.data
return lin_layer
def a( A : List[Any] , A : Union[str, Any]="facebook/mbart-large-en-ro" , A : int=False , A : Tuple=False ) -> Tuple:
"""simple docstring"""
a = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
a = state_dict['encoder.embed_tokens.weight'].shape[0]
a = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
a = 'relu'
a = state_dict['decoder.embed_tokens.weight']
a = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
_lowercase: Any = parser.parse_args()
_lowercase: int = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 227 |
'''simple docstring'''
import requests
__lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here!
__lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase : Dict = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 27 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__ ( ) -> int:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCAmelCase__ : Any = [1, 2, 3]
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend('unsupported backend' ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=2 )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with parallel_backend('unsupported backend' ):
map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCAmelCase__ : Optional[Any] = [1, 2]
lowerCAmelCase__ : List[str] = {'a': 1, 'b': 2}
lowerCAmelCase__ : Optional[Any] = {'a': [1, 2], 'b': [3, 4]}
lowerCAmelCase__ : Union[str, Any] = {'a': {'1': 1}, 'b': 2}
lowerCAmelCase__ : int = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowerCAmelCase__ : List[Any] = [2, 3]
lowerCAmelCase__ : List[Any] = {'a': 2, 'b': 3}
lowerCAmelCase__ : int = {'a': [2, 3], 'b': [4, 5]}
lowerCAmelCase__ : str = {'a': {'1': 2}, 'b': 3}
lowerCAmelCase__ : List[Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa | 212 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__a , self ).__init__()
__a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a )
__a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.bert(**__a ).last_hidden_state
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__a )
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : str = W_supports['sizes'].tolist()
__a : Union[str, Any] = W_supports['start_token_id'].item()
__a : Any = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Tuple = self.BERT(**__a )
__a : str = self.BERT(**__a )
__a : Any = None
__a : Dict = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__a : Optional[int] = 0
else:
__a : Union[str, Any] = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : str = torch.vstack((p_starts, p_start) )
__a : str = torch.vstack((p_ends, p_end) )
else:
__a : List[str] = p_start
__a : int = p_end
return p_starts, p_ends
| 27 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : str = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_UpperCAmelCase : List[Any] = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
return sd
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=rename_keys_prefix ):
'''simple docstring'''
snake_case_ = OrderedDict()
snake_case_ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case_ = key
for name_pair in rename_keys_prefix:
snake_case_ = new_key.replace(name_pair[0] , name_pair[1] )
snake_case_ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case_ = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
snake_case_ = 'pretraining'
if "vcr" in checkpoint_path:
snake_case_ = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
snake_case_ = {'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
snake_case_ = {'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
snake_case_ = {'visual_embedding_dim': 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
snake_case_ = {'visual_embedding_dim': 512}
snake_case_ = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
snake_case_ = {'visual_embedding_dim': 2048}
snake_case_ = 'vqa_advanced'
elif "vqa" in checkpoint_path:
snake_case_ = {'visual_embedding_dim': 2048, 'num_labels': 3129}
snake_case_ = 'vqa'
elif "nlvr" in checkpoint_path:
snake_case_ = {
'visual_embedding_dim': 1024,
'num_labels': 2,
}
snake_case_ = 'nlvr'
snake_case_ = VisualBertConfig(**_SCREAMING_SNAKE_CASE )
# Load State Dict
snake_case_ = load_state_dict(_SCREAMING_SNAKE_CASE )
snake_case_ = get_new_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if model_type == "pretraining":
snake_case_ = VisualBertForPreTraining(_SCREAMING_SNAKE_CASE )
elif model_type == "vqa":
snake_case_ = VisualBertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
elif model_type == "nlvr":
snake_case_ = VisualBertForVisualReasoning(_SCREAMING_SNAKE_CASE )
elif model_type == "multichoice":
snake_case_ = VisualBertForMultipleChoice(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Save Checkpoints
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : Any = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 285 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ort.SessionOptions()
__a : Dict = False
return options
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = 'A red cat sitting on a park bench'
__a : int = np.random.RandomState(0 )
__a : Tuple = pipe(
prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
super().tearDown()
gc.collect()
def __magic_name__ ( self ):
lowercase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase : Any = 'xvjiarui/stable-diffusion-2-inpainting'
lowercase : Optional[int] = FlaxStableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
lowercase : Optional[int] = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowercase : str = jax.random.PRNGKey(0 )
lowercase : Dict = 50
lowercase : int = jax.device_count()
lowercase : List[Any] = num_samples * [prompt]
lowercase : List[str] = num_samples * [init_image]
lowercase : Tuple = num_samples * [mask_image]
lowercase : Optional[int] = pipeline.prepare_inputs(__a , __a , __a )
# shard inputs and rng
lowercase : Optional[int] = replicate(__a )
lowercase : Dict = jax.random.split(__a , jax.device_count() )
lowercase : Any = shard(__a )
lowercase : Optional[Any] = shard(__a )
lowercase : str = shard(__a )
lowercase : Dict = pipeline(
__a , __a , __a , __a , __a , __a , jit=__a )
lowercase : Union[str, Any] = output.images.reshape(__a , 512 , 512 , 3 )
lowercase : Any = images[0, 253:256, 253:256, -1]
lowercase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase : Optional[int] = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 202 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Dict = 16
__lowercase : List[Any] = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : Dict = torch.cuda.memory_allocated()
__a : List[Any] = torch.cuda.max_memory_allocated()
__a : Tuple = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : int = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
# Initialize accelerator
__a : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Dict = config['lr']
__a : str = int(config['num_epochs'] )
__a : Optional[int] = int(config['seed'] )
__a : Any = int(config['batch_size'] )
__a : List[str] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Union[str, Any] = 1
__a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : str = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : str = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : str = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[str] = parser.parse_args()
__a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[str] = BertTokenizer
__UpperCamelCase : Any = BertTokenizerFast
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Tuple = True
__UpperCamelCase : Optional[Any] = filter_non_english
def __magic_name__ ( self : int ):
"""simple docstring"""
super().setUp()
_A: str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_A: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: int = 'UNwant\u00E9d,running'
_A: int = 'unwanted, running'
return input_text, output_text
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Any = self.tokenizer_class(self.vocab_file )
_A: Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __magic_name__ ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_A: Optional[Any] = self.get_tokenizer()
_A: Any = self.get_rust_tokenizer()
_A: int = 'UNwant\u00E9d,running'
_A: Tuple = tokenizer.tokenize(__a )
_A: List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_A: Any = tokenizer.encode(__a , add_special_tokens=__a )
_A: Optional[int] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_A: Optional[Any] = self.get_rust_tokenizer()
_A: Optional[int] = tokenizer.encode(__a )
_A: int = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# With lower casing
_A: Dict = self.get_tokenizer(do_lower_case=__a )
_A: Optional[int] = self.get_rust_tokenizer(do_lower_case=__a )
_A: Dict = 'UNwant\u00E9d,running'
_A: int = tokenizer.tokenize(__a )
_A: int = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
_A: Dict = tokenizer.encode(__a , add_special_tokens=__a )
_A: str = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
_A: List[str] = self.get_rust_tokenizer()
_A: List[Any] = tokenizer.encode(__a )
_A: Tuple = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Any = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: int = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A: List[Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: List[str] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = BasicTokenizer()
_A: List[str] = 'a\n\'ll !!to?\'d of, can\'t.'
_A: Union[str, Any] = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(__a ) , __a )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_A: Dict = {}
for i, token in enumerate(__a ):
_A: Tuple = i
_A: Union[str, Any] = WordpieceTokenizer(vocab=__a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Optional[int] = self.get_tokenizer()
_A: int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
_A: Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__a )
_A: Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__a )
_A: List[str] = tokenizer.build_inputs_with_special_tokens(__a )
_A: Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: str = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_A: Dict = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_A: Optional[int] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
_A: Optional[int] = tokenizer_r.do_lower_case if hasattr(__a , '''do_lower_case''' ) else False
_A: Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: Union[str, Any] = ['的', '人', '有']
_A: List[str] = ''.join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A: Any = True
_A: Any = self.tokenizer_class.from_pretrained(__a , **__a )
_A: Any = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_A: Tuple = tokenizer_p.encode(__a , add_special_tokens=__a )
_A: Union[str, Any] = tokenizer_r.encode(__a , add_special_tokens=__a )
_A: Dict = tokenizer_r.convert_ids_to_tokens(__a )
_A: Optional[Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
_A: Union[str, Any] = False
_A: List[str] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
_A: Optional[int] = self.tokenizer_class.from_pretrained(__a , **__a )
_A: List[str] = tokenizer_r.encode(__a , add_special_tokens=__a )
_A: List[str] = tokenizer_p.encode(__a , add_special_tokens=__a )
_A: str = tokenizer_r.convert_ids_to_tokens(__a )
_A: Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
_A: Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
| 121 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
lowercase = ['input_values', 'padding_mask']
def __init__( self : List[str] , snake_case : List[str] = 1 , snake_case : List[str] = 2_4_0_0_0 , snake_case : Optional[int] = 0.0 , snake_case : Any = None , snake_case : Union[str, Any] = None , **snake_case : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a )
UpperCamelCase_ : Dict = chunk_length_s
UpperCamelCase_ : Any = overlap
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Any:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] = None , snake_case : Any = False , snake_case : List[Any] = None , snake_case : Union[str, Any] = None , snake_case : Union[str, Any] = None , ) -> Any:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Tuple = bool(
isinstance(__a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCamelCase_ : Tuple = [np.asarray(__a , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__a , np.ndarray ):
UpperCamelCase_ : str = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCamelCase_ : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase_ : str = [np.asarray(__a ).T]
# verify inputs are valid
for idx, example in enumerate(__a ):
if example.ndim > 2:
raise ValueError(f"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"Expected stereo audio but example has {example.shape[-1]} channels" )
UpperCamelCase_ : Dict = None
UpperCamelCase_ : Any = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCamelCase_ : List[Any] = min(array.shape[0] for array in raw_audio )
UpperCamelCase_ : Any = int(np.floor(max_length / self.chunk_stride ) )
UpperCamelCase_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCamelCase_ : Optional[int] = max(array.shape[0] for array in raw_audio )
UpperCamelCase_ : List[str] = int(np.ceil(max_length / self.chunk_stride ) )
UpperCamelCase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCamelCase_ : Optional[Any] = 'max_length'
else:
UpperCamelCase_ : Any = input_values
# normal padding on batch
if padded_inputs is None:
UpperCamelCase_ : Tuple = self.pad(
__a , max_length=__a , truncation=__a , padding=__a , return_attention_mask=__a , )
if padding:
UpperCamelCase_ : Union[str, Any] = padded_inputs.pop('attention_mask' )
UpperCamelCase_ : Dict = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
UpperCamelCase_ : str = example[..., None]
input_values.append(example.T )
UpperCamelCase_ : List[str] = input_values
if return_tensors is not None:
UpperCamelCase_ : str = padded_inputs.convert_to_tensors(__a )
return padded_inputs
| 175 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[Any] = torch.device('cpu')
def lowerCamelCase ():
__a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = dct.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = []
for k in state_dict.keys():
__a : List[Any] = k
if ".pwconv" in k:
__a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[str] = 1_000
__a : Tuple = 'huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : Dict = [3, 3, 6, 4]
__a : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Dict = [3, 3, 9, 6]
__a : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : Dict = [4, 3, 10, 5]
__a : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Tuple = [4, 4, 12, 6]
__a : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[Any] = checkpoint
__a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Tuple = prepare_img()
__a : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Tuple:
lowercase_ : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Any:
lowercase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase_ : int = x_den * y_den * z_den
lowercase_ : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase ( UpperCAmelCase__ : int = 35 ) -> List[str]:
lowercase_ : set = set()
lowercase_ : int
lowercase_ : Fraction = Fraction(0 )
lowercase_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase_ : Union[str, Any] = x_num * y_den + x_den * y_num
lowercase_ : Optional[Any] = x_den * y_den
lowercase_ : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
lowercase_ : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase_ : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowercase_ : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
lowercase_ : int = x_num * y_num
lowercase_ : Optional[Any] = x_den * y_num + x_num * y_den
lowercase_ : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
lowercase_ : List[Any] = x_num * x_num * y_num * y_num
lowercase_ : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowercase_ : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 239 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "umt5"
A_ = ["past_key_values"]
def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
__a : Any = vocab_size
__a : Any = d_model
__a : str = d_kv
__a : Dict = d_ff
__a : Union[str, Any] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : Tuple = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : Optional[int] = dropout_rate
__a : List[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Any = use_cache
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : Dict = act_info[-1]
__a : Dict = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_heads
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_layers
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Dict = 'past_encoder_sequence + sequence'
__a : Tuple = {0: 'batch'}
__a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 5E-4
| 27 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
SCREAMING_SNAKE_CASE__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : tuple , _UpperCamelCase : Path , _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , use_external_data_format=_SCREAMING_SNAKE_CASE , enable_onnx_checker=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
else:
export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=_SCREAMING_SNAKE_CASE , output_names=_SCREAMING_SNAKE_CASE , dynamic_axes=_SCREAMING_SNAKE_CASE , do_constant_folding=_SCREAMING_SNAKE_CASE , opset_version=_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : bool = False ) -> Dict:
"""simple docstring"""
snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
snake_case = 'cpu'
snake_case = Path(_SCREAMING_SNAKE_CASE )
# VAE DECODER
snake_case = AutoencoderKL.from_pretrained(model_path + '/vae' )
snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
snake_case = vae_decoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , _SCREAMING_SNAKE_CASE , 2_5 , 2_5 ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 150 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =['ConvNextFeatureExtractor']
_lowerCamelCase =['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 287 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , ) -> Optional[int]:
_a = parent
_a = 13
_a = 7
_a = True
_a = True
_a = False
_a = True
_a = 99
_a = 32
_a = 2
_a = 4
_a = 37
_a = 'gelu'
_a = 0.1
_a = 0.1
_a = 512
_a = 16
_a = 2
_a = 0.02
_a = 3
_a = 4
_a = None
def _UpperCAmelCase ( self ) -> Dict:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = TFDistilBertModel(config=__a )
_a = {'input_ids': input_ids, 'attention_mask': input_mask}
_a = model(__a )
_a = [input_ids, input_mask]
_a = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_a = TFDistilBertForMaskedLM(config=__a )
_a = {'input_ids': input_ids, 'attention_mask': input_mask}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = TFDistilBertForQuestionAnswering(config=__a )
_a = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
_a = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
_a = self.num_labels
_a = TFDistilBertForSequenceClassification(__a )
_a = {'input_ids': input_ids, 'attention_mask': input_mask}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = self.num_choices
_a = TFDistilBertForMultipleChoice(__a )
_a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_a = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
_a = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
_a = self.num_labels
_a = TFDistilBertForTokenClassification(__a )
_a = {'input_ids': input_ids, 'attention_mask': input_mask}
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.prepare_config_and_inputs()
(_a) = config_and_inputs
_a = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A_ : int = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : Tuple = False
def _UpperCAmelCase ( self ) -> List[str]:
_a = TFDistilBertModelTester(self )
_a = ConfigTester(self , config_class=__a , dim=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def _UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def _UpperCAmelCase ( self ) -> str:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_a = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self ) -> Tuple:
_a = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_a = tf.constant([[0, 1, 2, 3, 4, 5]] )
_a = model(__a )[0]
_a = [1, 6, 768]
self.assertEqual(output.shape , __a )
_a = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 ) | 320 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
for attribute in key.split('.' ):
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Union[str, Any] = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : int = []
__a : List[str] = fairseq_model.state_dict()
__a : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a : int = None
for name, value in fairseq_dict.items():
__a : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : List[str] = True
elif name.split('.' )[0] == "proj":
__a : Tuple = fairseq_model.proj
__a : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : List[Any] = True
if "*" in mapped_key:
__a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : List[Any] = 'weight_g'
elif "weight_v" in name:
__a : List[Any] = 'weight_v'
elif "bias" in name:
__a : Optional[Any] = 'bias'
elif "weight" in name:
__a : Tuple = 'weight'
else:
__a : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : List[str] = full_name.split('conv_layers.' )[-1]
__a : Any = name.split('.' )
__a : List[str] = int(items[0] )
__a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a , __a : List[str] = emb.weight.shape
__a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Tuple = [line.split(' ' )[0] for line in lines]
__a : int = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ):
__a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__a : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
__a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
__a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
__a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
__a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
__a : int = False
# add projection layer
__a : str = nn.Parameter(projection_layer.weight )
__a : Any = nn.Parameter(projection_layer.bias )
__a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = hf_wavavec.config.to_dict()
__a : Tuple = tokenizer.pad_token_id
__a : Optional[int] = tokenizer.bos_token_id
__a : Union[str, Any] = tokenizer.eos_token_id
__a : Tuple = 'speech_to_text_2'
__a : Tuple = 'wav2vec2'
__a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 27 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( lowerCAmelCase_, unittest.TestCase ):
"""simple docstring"""
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ["image_embeds", "negative_image_embeds", "image", "hint"]
__A = ["image_embeds", "negative_image_embeds", "image", "hint"]
__A = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__A = False
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 100
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
torch.manual_seed(0 )
a = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a = UNetaDConditionModel(**__a )
return model
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
torch.manual_seed(0 )
a = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.dummy_unet
a = self.dummy_movq
a = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
a = DDIMScheduler(**__a )
a = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0 ):
"""simple docstring"""
a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create init_image
a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((256, 256) )
# create hint
a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith("mps" ):
a = torch.manual_seed(__a )
else:
a = torch.Generator(device=__a ).manual_seed(__a )
a = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = 'cpu'
a = self.get_dummy_components()
a = self.pipeline_class(**__a )
a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
a = pipe(**self.get_dummy_inputs(__a ) )
a = output.images
a = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
a = image[0, -3:, -3:, -1]
a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
a = init_image.resize((512, 512) )
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
a = torch.from_numpy(np.array(__a ) ).float() / 255.0
a = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
a = 'A robot, 4k photo'
a = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
a = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
a = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
a = torch.Generator(device="cpu" ).manual_seed(0 )
a = pipe_prior(
__a , image=__a , strength=0.85 , generator=__a , negative_prompt="" , ).to_tuple()
a = pipeline(
image=__a , image_embeds=__a , negative_image_embeds=__a , hint=__a , generator=__a , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="np" , )
a = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__a , __a )
| 227 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__lowercase : Dict = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__lowercase : Tuple = torch.device('cuda', local_rank)
__lowercase : Optional[int] = socket.gethostname()
__lowercase : List[str] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : str = dist.get_rank()
__lowercase : Union[str, Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 27 | 0 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase_ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'FlavaImageProcessor'
lowercase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[Any] , a : List[Any]=None , a : int=None , **a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __a , )
lowerCAmelCase__ : int = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__a , __a )
lowerCAmelCase__ : Optional[int] = self.image_processor
def __call__( self : Tuple , a : int = None , a : Union[str, Any] = None , a : Optional[Any] = True , a : Tuple = False , a : Optional[int] = False , a : List[str] = None , a : Tuple = 0 , a : Optional[int] = None , a : Optional[int] = None , a : Optional[int] = None , a : List[Any] = None , a : Dict = None , a : int = False , a : str = False , a : Any = False , a : Dict = False , a : int = True , a : str = None , **a : List[Any] , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCAmelCase__ : Union[str, Any] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
if images is not None:
lowerCAmelCase__ : Optional[Any] = self.image_processor(
__a , return_image_mask=__a , return_codebook_pixels=__a , return_tensors=__a , **__a , )
if text is not None and images is not None:
encoding.update(__a )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def _lowerCamelCase ( self : Optional[Any] , *a : int , **a : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def _lowerCamelCase ( self : Dict , *a : Any , **a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.tokenizer.model_input_names
lowerCAmelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __a , )
return self.image_processor | 212 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
from importlib import import_module
from .logging import get_logger
lowerCamelCase = get_logger(__name__)
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=None ) -> List[str]:
"""simple docstring"""
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self, __a, getattr(__a, __a ) )
a__ =module._original_module if isinstance(__a, _PatchedModuleObj ) else module
class __magic_name__ :
'''simple docstring'''
lowerCamelCase__ : int = []
def __init__( self, lowercase_, lowercase_, lowercase_, lowercase_=None ) -> List[str]:
"""simple docstring"""
a__ =obj
a__ =target
a__ =new
a__ =target.split('''.''' )[0]
a__ ={}
a__ =attrs or []
def __enter__( self ) -> List[Any]:
"""simple docstring"""
a__ =self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__a ) ):
try:
a__ =import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj, __a )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__a, _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj, __a, _PatchedModuleObj(__a, attrs=self.attrs ) )
a__ =getattr(self.obj, __a )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__a, __a, _PatchedModuleObj(getattr(__a, __a, __a ), attrs=self.attrs ) )
a__ =getattr(__a, __a )
# finally set the target attribute
setattr(__a, __a, self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('''.'''.join(__a ) ), __a )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, __a ) is attr_value:
a__ =getattr(self.obj, __a )
setattr(self.obj, __a, self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj, __a, self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self, *lowercase_ ) -> List[str]:
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj, __a, self.original.pop(__a ) )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 188 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowercase : Tuple = pytest.mark.integration
__lowercase : Optional[int] = {'comet'}
__lowercase : List[str] = importlib.util.find_spec('fairseq') is not None
__lowercase : str = {'code_eval'}
__lowercase : List[Any] = os.name == 'nt'
__lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase ():
__a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@local
class __UpperCamelCase ( parameterized.TestCase ):
A_ = {}
A_ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = '[...]'
__a : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
__a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__a : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = '[...]'
__a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def __UpperCAmelCase ( self ):
'''simple docstring'''
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join('metrics' , __a ) , *__a , **__a )
with patch('datasets.load_metric' ) as mock_load_metric:
__a : Dict = load_local_metric
yield
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
def wrapper(__a ):
__a : Optional[Any] = contextmanager(__a )
__a : str = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__a : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
import torch
def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__a : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ):
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
assert len(__a ) == 2
__a : Dict = [0.19, 0.92]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__a : str = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__a : int = load_from_checkpoint
yield
def lowerCamelCase ():
__a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) )
__a : List[str] = 'ERROR'
__a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
| 27 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
snake_case_ = dict(zip(__a , range(len(__a ) ) ) )
snake_case_ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
snake_case_ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
snake_case_ = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def a ( self , **snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__a )
def a ( self , **snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def a ( self , **snake_case ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
snake_case_ = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def a ( self ):
snake_case_ = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
snake_case_ = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
snake_case_ = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(__a , return_tensors='np' )
snake_case_ = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
snake_case_ = 'lower newer'
snake_case_ = processor(text=__a )
snake_case_ = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
snake_case_ = self.prepare_image_inputs()
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(images=__a , visual_prompt=__a )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(__a )
snake_case_ = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
| 285 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] )
__a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] )
else:
__a : int = np.asarray(__a )
__a : str = np.asarray(__a )
if ignore_case:
__a : Dict = np.char.lower(__a )
__a : List[str] = np.char.lower(__a )
if ignore_punctuation:
__a : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Dict = np.char.translate(__a , table=__a )
if ignore_numbers:
__a : Optional[int] = string.digits.maketrans('' , '' , string.digits )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Optional[int] = np.char.translate(__a , table=__a )
__a : Any = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 27 | 0 |
"""simple docstring"""
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __magic_name__ ( __snake_case : Tuple ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __magic_name__ ( __snake_case : Tuple ) -> Dict:
lowercase : Tuple = create_tensor(_SCREAMING_SNAKE_CASE )
lowercase : List[str] = gather(_SCREAMING_SNAKE_CASE )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __magic_name__ ( __snake_case : Tuple ) -> List[str]:
lowercase : int = [state.process_index]
lowercase : Tuple = gather_object(_SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == state.num_processes, f"""{gathered_obj}, {len(_SCREAMING_SNAKE_CASE )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), f"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __magic_name__ ( __snake_case : Dict ) -> Dict:
lowercase : List[str] = create_tensor(_SCREAMING_SNAKE_CASE )
lowercase : str = broadcast(_SCREAMING_SNAKE_CASE )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __magic_name__ ( __snake_case : str ) -> List[Any]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
lowercase : Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase : List[str] = torch.arange(state.num_processes ).to(state.device )
lowercase : int = pad_across_processes(_SCREAMING_SNAKE_CASE )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __magic_name__ ( __snake_case : int ) -> Tuple:
# For now runs on only two processes
if state.num_processes != 2:
return
lowercase : List[Any] = create_tensor(_SCREAMING_SNAKE_CASE )
lowercase : int = reduce(_SCREAMING_SNAKE_CASE , "sum" )
lowercase : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), f"""{reduced_tensor} != {truth_tensor}"""
def __magic_name__ ( __snake_case : int ) -> Tuple:
# For now runs on only two processes
if state.num_processes != 2:
return
lowercase : Union[str, Any] = create_tensor(_SCREAMING_SNAKE_CASE )
lowercase : Dict = reduce(_SCREAMING_SNAKE_CASE , "mean" )
lowercase : int = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), f"""{reduced_tensor} != {truth_tensor}"""
def __magic_name__ ( __snake_case : Any ) -> int:
# For xla_spawn (TPUs)
main()
def __magic_name__ ( ) -> Dict:
lowercase : Any = PartialState()
state.print(f"""State: {state}""" )
state.print("testing gather" )
test_gather(_SCREAMING_SNAKE_CASE )
state.print("testing gather_object" )
test_gather_object(_SCREAMING_SNAKE_CASE )
state.print("testing broadcast" )
test_broadcast(_SCREAMING_SNAKE_CASE )
state.print("testing pad_across_processes" )
test_pad_across_processes(_SCREAMING_SNAKE_CASE )
state.print("testing reduce_sum" )
test_reduce_sum(_SCREAMING_SNAKE_CASE )
state.print("testing reduce_mean" )
test_reduce_mean(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 202 |
'''simple docstring'''
import os
import sys
__lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 27 | 0 |
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase__ : Dict = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( a ) -> Optional[int]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( a ) -> Any:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_A: Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 121 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a_ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a_ = TaTokenizerFast
a_ = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a_ = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 175 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[Any] = tmp_path / 'file.csv'
__a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : str = tmp_path / 'malformed_file.csv'
__a : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = tmp_path / 'csv_with_image.csv'
__a : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Union[str, Any] = tmp_path / 'csv_with_label.csv'
__a : Any = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = tmp_path / 'csv_with_int_list.csv'
__a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : int = Csv()
__a : str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1]
__a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__a : Any = csv._generate_tables([[csv_file_with_image]] )
__a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__a : Any = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__a : List[str] = csv._generate_tables([[csv_file_with_label]] )
__a : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__a : int = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__a : Any = csv._generate_tables([[csv_file_with_int_list]] )
__a : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__a : Tuple = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 27 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowercase : Tuple = sys.version_info >= (3, 10)
def lowerCamelCase ( UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[Any]=None ) -> List[str]:
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''})
@dataclass
class __magic_name__ :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
class __magic_name__ ( lowerCAmelCase_):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
class __magic_name__ ( lowerCAmelCase_):
UpperCamelCase__ = '''titi'''
UpperCamelCase__ = '''toto'''
UpperCamelCase__ = 42
@dataclass
class __magic_name__ :
UpperCamelCase__ = '''toto'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Any = BasicEnum(self.foo )
@dataclass
class __magic_name__ :
UpperCamelCase__ = '''toto'''
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Dict = MixedTypeEnum(self.foo )
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase_, metadata={'''help''': '''help message'''})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
@dataclass
class __magic_name__ :
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[1, 2, 3])
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __magic_name__ :
UpperCamelCase__ = field()
UpperCamelCase__ = field()
UpperCamelCase__ = field()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = BasicEnum(self.required_enum )
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = field()
UpperCamelCase__ = None
UpperCamelCase__ = field(default='''toto''', metadata={'''help''': '''help message'''})
UpperCamelCase__ = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''])
if is_python_no_less_than_3_10:
@dataclass
class __magic_name__ :
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = None
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = field(default=lowerCAmelCase_, metadata={'''help''': '''help message'''})
UpperCamelCase__ = None
UpperCamelCase__ = list_field(default=[])
UpperCamelCase__ = list_field(default=[])
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase_ : Dict = {k: v for k, v in vars(__a ).items() if k != 'container'}
lowercase_ : Tuple = {k: v for k, v in vars(__a ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __a ) and yy.get("""choices""" , __a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__a ) , yy["""type"""](__a ) )
del xx["type"], yy["type"]
self.assertEqual(__a , __a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = HfArgumentParser(__a )
lowercase_ : List[str] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__a , required=__a )
expected.add_argument("""--bar""" , type=__a , required=__a )
expected.add_argument("""--baz""" , type=__a , required=__a )
expected.add_argument("""--flag""" , type=__a , default=__a , const=__a , nargs="""?""" )
self.argparsersEqual(__a , __a )
lowercase_ : Optional[int] = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
(lowercase_ ) : Any = parser.parse_args_into_dataclasses(__a , look_for_args_file=__a )
self.assertFalse(example.flag )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : str = HfArgumentParser(__a )
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__a )
expected.add_argument("""--baz""" , default="""toto""" , type=__a , help="""help message""" )
self.argparsersEqual(__a , __a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__a , default=__a , const=__a , nargs="""?""" )
expected.add_argument("""--baz""" , type=__a , default=__a , const=__a , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__a , dest="""baz""" )
expected.add_argument("""--opt""" , type=__a , default=__a )
lowercase_ : str = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
lowercase_ : Dict = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
lowercase_ : List[Any] = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
lowercase_ : Tuple = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
lowercase_ : List[Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
lowercase_ : int = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__a , Namespace(foo=__a , baz=__a , opt=__a ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = HfArgumentParser(__a )
lowercase_ : str = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__a , __a )
lowercase_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase_ : Tuple = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase_ : List[str] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase_ : Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
lowercase_ : Any = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
@dataclass
class __magic_name__ :
UpperCamelCase__ = '''toto'''
lowercase_ : Dict = HfArgumentParser(__a )
lowercase_ : List[str] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__a , __a )
lowercase_ : str = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
lowercase_ : Optional[int] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
lowercase_ : Any = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = HfArgumentParser(__a )
lowercase_ : Any = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__a )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__a )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__a )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__a )
self.argparsersEqual(__a , __a )
lowercase_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(
__a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase_ : Tuple = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__a , type=__a )
expected.add_argument("""--bar""" , default=__a , type=__a , help="""help message""" )
expected.add_argument("""--baz""" , default=__a , type=__a )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__a )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__a )
lowercase_ : Any = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__a )
for dataclass_type in dataclass_types:
lowercase_ : Tuple = HfArgumentParser(__a )
self.argparsersEqual(__a , __a )
lowercase_ : Tuple = parser.parse_args([] )
self.assertEqual(__a , Namespace(foo=__a , bar=__a , baz=__a , ces=[] , des=[] ) )
lowercase_ : Tuple = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__a , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = HfArgumentParser(__a )
lowercase_ : str = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__a , required=__a )
expected.add_argument("""--required_str""" , type=__a , required=__a )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__a , )
self.argparsersEqual(__a , __a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : str = HfArgumentParser(__a )
lowercase_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__a , required=__a )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__a , )
expected.add_argument("""--opt""" , type=__a , default=__a )
expected.add_argument("""--baz""" , default="""toto""" , type=__a , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__a )
self.argparsersEqual(__a , __a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = HfArgumentParser(__a )
lowercase_ : Optional[Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
lowercase_ : Optional[Any] = parser.parse_dict(__a )[0]
lowercase_ : str = BasicExample(**__a )
self.assertEqual(__a , __a )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Dict = HfArgumentParser(__a )
lowercase_ : str = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__a , parser.parse_dict , __a , allow_extra_keys=__a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = HfArgumentParser(__a )
lowercase_ : Optional[int] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : List[str] = os.path.join(__a , """temp_json""" )
os.mkdir(__a )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(__a , __a )
lowercase_ : Tuple = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
lowercase_ : str = BasicExample(**__a )
self.assertEqual(__a , __a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = HfArgumentParser(__a )
lowercase_ : Union[str, Any] = {
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase_ : List[str] = os.path.join(__a , """temp_yaml""" )
os.mkdir(__a )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(__a , __a )
lowercase_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
lowercase_ : Dict = BasicExample(**__a )
self.assertEqual(__a , __a )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Tuple = HfArgumentParser(__a )
self.assertIsNotNone(__a )
| 239 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
SCREAMING_SNAKE_CASE__ = [8, 5, 9, 7]
SCREAMING_SNAKE_CASE__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
SCREAMING_SNAKE_CASE__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = claim_vector
snake_case = allocated_resources_table
snake_case = maximum_claim_table
def snake_case ( self ):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def snake_case ( self ):
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def snake_case ( self ):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def snake_case ( self ):
"""simple docstring"""
return {self.__need().index(__a ): i for i in self.__need()}
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
snake_case = self.__need()
snake_case = self.__allocated_resources_table
snake_case = self.__available_resources()
snake_case = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
snake_case = False
for each_need in need_list:
snake_case = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
snake_case = False
break
if execution:
snake_case = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
snake_case = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
snake_case = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def snake_case ( self ):
"""simple docstring"""
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _a ( lowerCamelCase ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )}
def _a ( ):
lowerCamelCase : Optional[int] = ArgumentParser(
"""HuggingFace Datasets CLI tool""", usage="""datasets-cli <command> [<args>]""", allow_abbrev=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Parse args
lowerCamelCase : Dict = parser.parse_known_args()
if not hasattr(_SCREAMING_SNAKE_CASE, """func""" ):
parser.print_help()
exit(1 )
lowerCamelCase : List[str] = parse_unknown_args(_SCREAMING_SNAKE_CASE )
# Run
lowerCamelCase : Optional[Any] = args.func(_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 287 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__a : Tuple = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )]
__a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE )
__a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
__a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
__a : str = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
__a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
__a : List[Any] = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a : List[str] = test.model_tester.__class__
return model_tester
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = []
for test_class in test_classes:
__a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : str = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 27 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase_ ):
'''simple docstring'''
A_ : Optional[int] = (DDIMParallelScheduler,)
A_ : Union[str, Any] = (('eta', 0.0), ('num_inference_steps', 50))
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[Any]:
_a = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**__a )
return config
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**__a )
_a = scheduler_class(**__a )
_a = 10, 0.0
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for t in scheduler.timesteps:
_a = model(__a , __a )
_a = scheduler.step(__a , __a , __a , __a ).prev_sample
return sample
def _UpperCAmelCase ( self ) -> Optional[int]:
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__a )
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(steps_offset=1 )
_a = scheduler_class(**__a )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _UpperCAmelCase ( self ) -> str:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def _UpperCAmelCase ( self ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def _UpperCAmelCase ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def _UpperCAmelCase ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def _UpperCAmelCase ( self ) -> Any:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__a )
def _UpperCAmelCase ( self ) -> Dict:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def _UpperCAmelCase ( self ) -> int:
for t in [1, 10, 49]:
self.check_over_forward(time_step=__a )
def _UpperCAmelCase ( self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__a , num_inference_steps=__a )
def _UpperCAmelCase ( self ) -> str:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__a , eta=__a )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _UpperCAmelCase ( self ) -> int:
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = 10, 0.0
scheduler.set_timesteps(__a )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = self.dummy_sample_deter + 0.1
_a = self.dummy_sample_deter - 0.1
_a = samplea.shape[0]
_a = torch.stack([samplea, samplea, samplea] , dim=0 )
_a = torch.arange(__a )[0:3, None].repeat(1 , __a )
_a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_a = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __a )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def _UpperCAmelCase ( self ) -> Any:
_a = self.full_loop()
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.full_loop(prediction_type='''v_prediction''' )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def _UpperCAmelCase ( self ) -> Any:
_a = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def _UpperCAmelCase ( self ) -> Dict:
_a = self.full_loop(set_alpha_to_one=__a , beta_start=0.01 )
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3 | 320 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 | 0 |
from typing import Any
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
a = data
a = None
def __repr__(self ):
"""simple docstring"""
return F'''Node({self.data})'''
class _lowercase :
"""simple docstring"""
def __init__(self ):
"""simple docstring"""
a = None
def __iter__(self ):
"""simple docstring"""
a = self.head
while node:
yield node.data
a = node.next
def __len__(self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__(self ):
"""simple docstring"""
return "->".join([str(__a ) for item in self] )
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
a = self.head
for _ in range(__a ):
a = current.next
a = data
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
self.insert_nth(len(self ) , __a )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
self.insert_nth(0 , __a )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
a = Node(__a )
if self.head is None:
a = new_node
elif index == 0:
a = self.head # link new_node to head
a = new_node
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = new_node
def UpperCamelCase_ (self ): # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase_ (self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ (self , lowerCamelCase_ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
a = self.head # default first node
if index == 0:
a = self.head.next
else:
a = self.head
for _ in range(index - 1 ):
a = temp.next
a = temp.next
a = temp.next.next
return delete_node.data
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.head is None
def UpperCamelCase_ (self ):
"""simple docstring"""
a = None
a = self.head
while current:
# Store the current node's next node.
a = current.next
# Make the current node's next point backwards
a = prev
# Make the previous node be the current node
a = current
# Make the current node the next node (to progress iteration)
a = next_node
# Return prev in order to put the head at the end
a = prev
def a( ) -> int:
"""simple docstring"""
a = LinkedList()
assert linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_SCREAMING_SNAKE_CASE ) == 9
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
a = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a( ) -> List[str]:
"""simple docstring"""
a = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
a = LinkedList()
for i in test_input:
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a = linked_list.delete_head()
assert result == -9
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a = linked_list.delete_tail()
assert result == 12.2
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a = linked_list.delete_nth(10 )
assert result is None
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
a = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(_SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
a = input("Enter New Value: " ).strip()
print("New list:" )
print(_SCREAMING_SNAKE_CASE )
print(f'''length of linked_list is : {len(_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
main()
| 227 |
'''simple docstring'''
import requests
__lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here!
__lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase : Dict = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 27 | 0 |
import math
class A__ :
def _lowerCamelCase ( self : int , a : Optional[Any] , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 0.0
lowerCAmelCase__ : Optional[int] = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _lowerCamelCase ( self : int , a : Union[str, Any] , a : Optional[int] , a : Optional[Any] , a : List[Any] ):
'''simple docstring'''
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase__ ( ) -> Tuple:
# Training Examples ( m, n )
lowerCAmelCase__ : int = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCAmelCase__ : Optional[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCAmelCase__ : Optional[Any] = SelfOrganizingMap()
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : Tuple = 0.5
for _ in range(_SCREAMING_SNAKE_CASE ):
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
# training sample
lowerCAmelCase__ : List[str] = training_samples[j]
# Compute the winning vector
lowerCAmelCase__ : Union[str, Any] = self_organizing_map.get_winner(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Update the winning vector
lowerCAmelCase__ : int = self_organizing_map.update(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# classify test sample
lowerCAmelCase__ : Union[str, Any] = [0, 0, 0, 1]
lowerCAmelCase__ : Optional[int] = self_organizing_map.get_winner(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main() | 212 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__a , self ).__init__()
__a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a )
__a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.bert(**__a ).last_hidden_state
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__a )
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : str = W_supports['sizes'].tolist()
__a : Union[str, Any] = W_supports['start_token_id'].item()
__a : Any = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Tuple = self.BERT(**__a )
__a : str = self.BERT(**__a )
__a : Any = None
__a : Dict = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__a : Optional[int] = 0
else:
__a : Union[str, Any] = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : str = torch.vstack((p_starts, p_start) )
__a : str = torch.vstack((p_ends, p_end) )
else:
__a : List[str] = p_start
__a : int = p_end
return p_starts, p_ends
| 27 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, lowercase_, ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=__a, text_encoder=__a, tokenizer=__a, unet=__a, scheduler=__a, safety_checker=__a, feature_extractor=__a, )
def _UpperCAmelCase ( self, lowercase_ = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__a )
@torch.no_grad()
def __call__( self, lowercase_, lowercase_ = 512, lowercase_ = 512, lowercase_ = 50, lowercase_ = 7.5, lowercase_ = None, lowercase_ = 1, lowercase_ = 0.0, lowercase_ = None, lowercase_ = None, lowercase_ = "pil", lowercase_ = True, lowercase_ = None, lowercase_ = 1, lowercase_ = None, **lowercase_, ) -> Dict:
"""simple docstring"""
if isinstance(__a, __a ):
a__ =1
elif isinstance(__a, __a ):
a__ =len(__a )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__a )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a, __a ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__a )}.""" )
# get prompt text embeddings
a__ =self.tokenizer(
__a, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
a__ =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
a__ =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
a__ =text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
a__ =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
a__ =text_embeddings.shape
a__ =text_embeddings.repeat(1, __a, 1 )
a__ =text_embeddings.view(bs_embed * num_images_per_prompt, __a, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
a__ =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
a__ =42
if negative_prompt is None:
a__ =['']
elif type(__a ) is not type(__a ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__a )} !="""
F""" {type(__a )}.""" )
elif isinstance(__a, __a ):
a__ =[negative_prompt]
elif batch_size != len(__a ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__a )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
a__ =negative_prompt
a__ =text_input_ids.shape[-1]
a__ =self.tokenizer(
__a, padding='''max_length''', max_length=__a, truncation=__a, return_tensors='''pt''', )
a__ =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
a__ =uncond_embeddings.shape[1]
a__ =uncond_embeddings.repeat(__a, __a, 1 )
a__ =uncond_embeddings.view(batch_size * num_images_per_prompt, __a, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__ =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
a__ =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
a__ =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
a__ =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
a__ =torch.randn(
__a, generator=__a, device='''cpu''', dtype=__a ).to(self.device )
a__ =torch.randn(__a, generator=__a, device='''cpu''', dtype=__a ).to(
self.device )
else:
a__ =torch.randn(
__a, generator=__a, device=self.device, dtype=__a )
a__ =torch.randn(__a, generator=__a, device=self.device, dtype=__a )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
a__ =latents_reference.to(self.device )
a__ =latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
a__ =(latents_shape[3] - latents_shape_reference[3]) // 2
a__ =(latents_shape[2] - latents_shape_reference[2]) // 2
a__ =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
a__ =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
a__ =0 if dx < 0 else dx
a__ =0 if dy < 0 else dy
a__ =max(-dx, 0 )
a__ =max(-dy, 0 )
# import pdb
# pdb.set_trace()
a__ =latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
a__ =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
a__ =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a__ ='eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a__ ={}
if accepts_eta:
a__ =eta
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
a__ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__ =self.scheduler.scale_model_input(__a, __a )
# predict the noise residual
a__ =self.unet(__a, __a, encoder_hidden_states=__a ).sample
# perform guidance
if do_classifier_free_guidance:
a__ =noise_pred.chunk(2 )
a__ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
a__ =self.scheduler.step(__a, __a, __a, **__a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a, __a, __a )
a__ =1 / 0.18215 * latents
a__ =self.vae.decode(__a ).sample
a__ =(image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a__ =image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if self.safety_checker is not None:
a__ =self.feature_extractor(self.numpy_to_pil(__a ), return_tensors='''pt''' ).to(
self.device )
a__ =self.safety_checker(
images=__a, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
a__ =None
if output_type == "pil":
a__ =self.numpy_to_pil(__a )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__a, nsfw_content_detected=__a )
| 188 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_UpperCAmelCase : Tuple = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 285 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ort.SessionOptions()
__a : Dict = False
return options
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = 'A red cat sitting on a park bench'
__a : int = np.random.RandomState(0 )
__a : Tuple = pipe(
prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] = logging.get_logger(__name__)
_A : List[str] = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class a__ ( lowerCAmelCase_ ):
__lowerCAmelCase = """bridgetower_vision_model"""
def __init__( self , _a=768 , _a=12 , _a=3 , _a=16 , _a=288 , _a=1 , _a=1E-05 , _a=False , _a=True , _a=False , **_a , ):
super().__init__(**__a )
lowercase : Dict = hidden_size
lowercase : Any = num_hidden_layers
lowercase : Optional[int] = num_channels
lowercase : int = patch_size
lowercase : Any = image_size
lowercase : Union[str, Any] = initializer_factor
lowercase : Optional[int] = layer_norm_eps
lowercase : Tuple = stop_gradient
lowercase : int = share_layernorm
lowercase : str = remove_last_layer
@classmethod
def __magic_name__ ( cls , _a , **_a ):
lowercase : Optional[int] = cls.get_config_dict(__a , **__a )
if config_dict.get("model_type" ) == "bridgetower":
lowercase : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class a__ ( lowerCAmelCase_ ):
__lowerCAmelCase = """bridgetower_text_model"""
def __init__( self , _a=50_265 , _a=768 , _a=12 , _a=12 , _a=1 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=514 , _a=1 , _a=1E-05 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , **_a , ):
super().__init__(**__a )
lowercase : Union[str, Any] = vocab_size
lowercase : Any = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : List[Any] = num_attention_heads
lowercase : str = hidden_act
lowercase : Optional[int] = initializer_factor
lowercase : Optional[Any] = intermediate_size
lowercase : Any = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : int = type_vocab_size
lowercase : Any = layer_norm_eps
lowercase : Union[str, Any] = position_embedding_type
lowercase : Optional[int] = use_cache
lowercase : Dict = pad_token_id
lowercase : Tuple = bos_token_id
lowercase : int = eos_token_id
@classmethod
def __magic_name__ ( cls , _a , **_a ):
lowercase : Union[str, Any] = cls.get_config_dict(__a , **__a )
if config_dict.get("model_type" ) == "bridgetower":
lowercase : Union[str, Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class a__ ( lowerCAmelCase_ ):
__lowerCAmelCase = """bridgetower"""
def __init__( self , _a=True , _a="gelu" , _a=768 , _a=1 , _a=1E-05 , _a=False , _a="add" , _a=12 , _a=6 , _a=False , _a=False , _a=None , _a=None , **_a , ):
lowercase : Dict = kwargs.pop("text_config_dict" , __a )
lowercase : Tuple = kwargs.pop("vision_config_dict" , __a )
super().__init__(**__a )
lowercase : int = share_cross_modal_transformer_layers
lowercase : Any = hidden_act
lowercase : str = hidden_size
lowercase : int = initializer_factor
lowercase : List[str] = layer_norm_eps
lowercase : Optional[int] = share_link_tower_layers
lowercase : List[Any] = link_tower_type
lowercase : Union[str, Any] = num_attention_heads
lowercase : Optional[Any] = num_hidden_layers
lowercase : int = tie_word_embeddings
lowercase : int = init_layernorm_from_vision_encoder
if text_config is None:
lowercase : Union[str, Any] = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
lowercase : Optional[Any] = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
lowercase : Optional[Any] = BridgeTowerTextConfig(**__a )
lowercase : Optional[int] = BridgeTowerVisionConfig(**__a )
@classmethod
def __magic_name__ ( cls , _a , _a , **_a ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def __magic_name__ ( self ):
lowercase : Optional[int] = copy.deepcopy(self.__dict__ )
lowercase : Any = self.text_config.to_dict()
lowercase : Union[str, Any] = self.vision_config.to_dict()
lowercase : Union[str, Any] = self.__class__.model_type
return output
| 202 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Dict = 16
__lowercase : List[Any] = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : Dict = torch.cuda.memory_allocated()
__a : List[Any] = torch.cuda.max_memory_allocated()
__a : Tuple = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : int = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
# Initialize accelerator
__a : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Dict = config['lr']
__a : str = int(config['num_epochs'] )
__a : Optional[int] = int(config['seed'] )
__a : Any = int(config['batch_size'] )
__a : List[str] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Union[str, Any] = 1
__a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : str = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : str = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : str = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[str] = parser.parse_args()
__a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : Any = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = '''facebook/nllb-200-distilled-600M'''
__UpperCamelCase : Optional[Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
__UpperCamelCase : Union[str, Any] = '''translator'''
__UpperCamelCase : List[Any] = AutoTokenizer
__UpperCamelCase : Optional[Any] = AutoModelForSeqaSeqLM
__UpperCamelCase : Dict = LANGUAGE_CODES
__UpperCamelCase : int = ['''text''', '''text''', '''text''']
__UpperCamelCase : Tuple = ['''text''']
def __magic_name__ ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
_A: Any = self.lang_to_code[src_lang]
_A: Tuple = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__a , return_tensors='''pt''' , src_lang=__a , tgt_lang=__a )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self.model.generate(**__a )
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__a )
| 121 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27 | 0 |
import math
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_SCREAMING_SNAKE_CASE )
def __lowercase ( lowerCamelCase : float = 1 / 12345 ):
UpperCamelCase_ : List[str] = 0
UpperCamelCase_ : Any = 0
UpperCamelCase_ : int = 3
while True:
UpperCamelCase_ : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : str = int(_SCREAMING_SNAKE_CASE )
total_partitions += 1
if check_partition_perfect(_SCREAMING_SNAKE_CASE ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_SCREAMING_SNAKE_CASE )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 175 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[Any] = torch.device('cpu')
def lowerCamelCase ():
__a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = dct.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = []
for k in state_dict.keys():
__a : List[Any] = k
if ".pwconv" in k:
__a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[str] = 1_000
__a : Tuple = 'huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : Dict = [3, 3, 6, 4]
__a : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Dict = [3, 3, 9, 6]
__a : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : Dict = [4, 3, 10, 5]
__a : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Tuple = [4, 4, 12, 6]
__a : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[Any] = checkpoint
__a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Tuple = prepare_img()
__a : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 27 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( lowerCAmelCase_):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """embed_dim""" ) )
self.parent.assertTrue(hasattr(__a , """num_heads""" ) )
class __magic_name__ :
def __init__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str=13 , lowercase_ : List[Any]=64 , lowercase_ : Dict=3 , lowercase_ : List[str]=[16, 48, 96] , lowercase_ : str=[1, 3, 6] , lowercase_ : Union[str, Any]=[1, 2, 10] , lowercase_ : str=[7, 3, 3] , lowercase_ : List[Any]=[4, 2, 2] , lowercase_ : int=[2, 1, 1] , lowercase_ : Tuple=[2, 2, 2] , lowercase_ : Tuple=[False, False, True] , lowercase_ : Tuple=[0.0, 0.0, 0.0] , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : int=True , lowercase_ : List[str]=True , lowercase_ : Any=2 , ):
lowercase_ : str = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : List[str] = patch_sizes
lowercase_ : str = patch_stride
lowercase_ : Any = patch_padding
lowercase_ : Dict = is_training
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Dict = num_labels
lowercase_ : List[Any] = num_channels
lowercase_ : Any = embed_dim
lowercase_ : int = num_heads
lowercase_ : Optional[int] = stride_kv
lowercase_ : Dict = depth
lowercase_ : List[str] = cls_token
lowercase_ : List[Any] = attention_drop_rate
lowercase_ : Tuple = initializer_range
lowercase_ : int = layer_norm_eps
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
lowercase_ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : str = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
lowercase_ : Optional[int] = TFCvtModel(config=__a )
lowercase_ : Dict = model(__a , training=__a )
lowercase_ : Any = (self.image_size, self.image_size)
lowercase_ : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowercase_ : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowercase_ : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] ):
lowercase_ : List[Any] = self.num_labels
lowercase_ : Optional[int] = TFCvtForImageClassification(__a )
lowercase_ : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
lowercase_ : Tuple = config_and_inputs
lowercase_ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCAmelCase_, lowerCAmelCase_, unittest.TestCase):
UpperCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : int = TFCvtModelTester(self )
lowercase_ : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def SCREAMING_SNAKE_CASE_ ( self : int ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Any = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(__a )
lowercase_ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[Any] = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
def check_hidden_states_output(lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[int] ):
lowercase_ : List[str] = model_class(__a )
lowercase_ : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
lowercase_ : Any = outputs.hidden_states
lowercase_ : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase_ : Tuple = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : int = image_processor(images=__a , return_tensors="""tf""" )
# forward pass
lowercase_ : Any = model(**__a )
# verify the logits
lowercase_ : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
lowercase_ : Optional[Any] = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 239 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "umt5"
A_ = ["past_key_values"]
def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
__a : Any = vocab_size
__a : Any = d_model
__a : str = d_kv
__a : Dict = d_ff
__a : Union[str, Any] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : Tuple = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : Optional[int] = dropout_rate
__a : List[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Any = use_cache
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : Dict = act_info[-1]
__a : Dict = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_heads
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_layers
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Dict = 'past_encoder_sequence + sequence'
__a : Tuple = {0: 'batch'}
__a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 5E-4
| 27 | 0 |
"""simple docstring"""
import cva
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if k in (0.04, 0.06):
snake_case = k
snake_case = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ):
"""simple docstring"""
return str(self.k )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = cva.imread(__a , 0 )
snake_case = img.shape
snake_case = []
snake_case = img.copy()
snake_case = cva.cvtColor(__a , cva.COLOR_GRAY2RGB )
snake_case = np.gradient(__a )
snake_case = dx**2
snake_case = dy**2
snake_case = dx * dy
snake_case = 0.04
snake_case = self.window_size // 2
for y in range(__a , h - offset ):
for x in range(__a , w - offset ):
snake_case = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case = (wxx * wyy) - (wxy**2)
snake_case = wxx + wyy
snake_case = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = HarrisCorner(0.04, 3)
SCREAMING_SNAKE_CASE__ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 150 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 | 0 |
_lowerCamelCase ={}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCamelCase : List[str] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCamelCase : Union[str, Any] = _calculate(days - 1, _SCREAMING_SNAKE_CASE, late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCamelCase : Optional[Any] = _calculate(days - 1, absent + 1, 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCamelCase : Tuple = _calculate(days - 1, _SCREAMING_SNAKE_CASE, 0 )
lowerCamelCase : Dict = state_late + state_absent + state_ontime
lowerCamelCase : str = prizestrings
return prizestrings
def _a ( lowerCamelCase = 30 ):
return _calculate(_SCREAMING_SNAKE_CASE, absent=0, late=0 )
if __name__ == "__main__":
print(solution())
| 287 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
import argparse
import os
import re
__snake_case = 'src/diffusers'
# Pattern that looks at the indentation in a line.
__snake_case = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__snake_case = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__snake_case = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__snake_case = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__snake_case = re.compile(r'''\[([^\]]+)\]''')
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = _re_indent.search(_SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int]="", _lowerCAmelCase : Any=None, _lowerCAmelCase : Dict=None ):
"""simple docstring"""
_a = 0
_a = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(_SCREAMING_SNAKE_CASE ):
index += 1
_a = ['\n'.join(lines[:index] )]
else:
_a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_a = [lines[index]]
index += 1
while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
if index < len(_SCREAMING_SNAKE_CASE ) - 1:
_a = [lines[index + 1]]
index += 1
else:
_a = []
else:
blocks.append('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
_a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_SCREAMING_SNAKE_CASE ) > 0:
blocks.append('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
def _inner(_lowerCAmelCase : Optional[int] ):
return key(_SCREAMING_SNAKE_CASE ).lower().replace('''_''', '''''' )
return _inner
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
def noop(_lowerCAmelCase : int ):
return x
if key is None:
_a = noop
# Constants are all uppercase, they go first.
_a = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_a = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
_a = [obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()]
_a = ignore_underscore(_SCREAMING_SNAKE_CASE )
return sorted(_SCREAMING_SNAKE_CASE, key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE, key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE, key=_SCREAMING_SNAKE_CASE )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
def _replace(_lowerCAmelCase : List[str] ):
_a = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_a = [part.strip().replace('''"''', '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_a = keys[:-1]
return "[" + ", ".join([f'\"{k}\"' for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]"
_a = import_statement.split('''\n''' )
if len(_SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_a = 2 if lines[1].strip() == '[' else 1
_a = [(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_a = sort_objects(_SCREAMING_SNAKE_CASE, key=lambda _lowerCAmelCase : x[1] )
_a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_a = _re_bracket_content.sub(_replace, lines[1] )
else:
_a = [part.strip().replace('''"''', '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_a = keys[:-1]
_a = get_indent(lines[1] ) + ', '.join([f'\"{k}\"' for k in sort_objects(_SCREAMING_SNAKE_CASE )] )
return "\n".join(_SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
_a = _re_bracket_content.sub(_replace, _SCREAMING_SNAKE_CASE )
return import_statement
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Union[str, Any]=True ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE, '''r''' ) as f:
_a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_a = split_code_in_indented_blocks(
_SCREAMING_SNAKE_CASE, start_prompt='''_import_structure = {''', end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(_SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_a = main_blocks[block_idx]
_a = block.split('''\n''' )
# Get to the start of the imports.
_a = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_a = len(_SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(_SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
_a = '\n'.join(block_lines[line_idx:-1] )
_a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_a = split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE, indent_level=_SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
_a = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_a = [(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_a = [(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None]
_a = [x[0] for x in sorted(_SCREAMING_SNAKE_CASE, key=lambda _lowerCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_a = 0
_a = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
_a = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_SCREAMING_SNAKE_CASE, '''w''' ) as f:
f.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
def A_ ( _lowerCAmelCase : Union[str, Any]=True ):
"""simple docstring"""
_a = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
_a = sort_imports(os.path.join(_SCREAMING_SNAKE_CASE, '''__init__.py''' ), check_only=_SCREAMING_SNAKE_CASE )
if result:
_a = [os.path.join(_SCREAMING_SNAKE_CASE, '''__init__.py''' )]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f'Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`.' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__snake_case = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only) | 320 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
for attribute in key.split('.' ):
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Union[str, Any] = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : int = []
__a : List[str] = fairseq_model.state_dict()
__a : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a : int = None
for name, value in fairseq_dict.items():
__a : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : List[str] = True
elif name.split('.' )[0] == "proj":
__a : Tuple = fairseq_model.proj
__a : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : List[Any] = True
if "*" in mapped_key:
__a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : List[Any] = 'weight_g'
elif "weight_v" in name:
__a : List[Any] = 'weight_v'
elif "bias" in name:
__a : Optional[Any] = 'bias'
elif "weight" in name:
__a : Tuple = 'weight'
else:
__a : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : List[str] = full_name.split('conv_layers.' )[-1]
__a : Any = name.split('.' )
__a : List[str] = int(items[0] )
__a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a , __a : List[str] = emb.weight.shape
__a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Tuple = [line.split(' ' )[0] for line in lines]
__a : int = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ):
__a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__a : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
__a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
__a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
__a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
__a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
__a : int = False
# add projection layer
__a : str = nn.Parameter(projection_layer.weight )
__a : Any = nn.Parameter(projection_layer.bias )
__a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = hf_wavavec.config.to_dict()
__a : Tuple = tokenizer.pad_token_id
__a : Optional[int] = tokenizer.bos_token_id
__a : Union[str, Any] = tokenizer.eos_token_id
__a : Tuple = 'speech_to_text_2'
__a : Tuple = 'wav2vec2'
__a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 27 | 0 |
import argparse
import struct
import unittest
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
a = data
# Initialize hash values
a = [
0X6A09_E667,
0XBB67_AE85,
0X3C6E_F372,
0XA54F_F53A,
0X510E_527F,
0X9B05_688C,
0X1F83_D9AB,
0X5BE0_CD19,
]
# Initialize round constants
a = [
0X428A_2F98,
0X7137_4491,
0XB5C0_FBCF,
0XE9B5_DBA5,
0X3956_C25B,
0X59F1_11F1,
0X923F_82A4,
0XAB1C_5ED5,
0XD807_AA98,
0X1283_5B01,
0X2431_85BE,
0X550C_7DC3,
0X72BE_5D74,
0X80DE_B1FE,
0X9BDC_06A7,
0XC19B_F174,
0XE49B_69C1,
0XEFBE_4786,
0X0FC1_9DC6,
0X240C_A1CC,
0X2DE9_2C6F,
0X4A74_84AA,
0X5CB0_A9DC,
0X76F9_88DA,
0X983E_5152,
0XA831_C66D,
0XB003_27C8,
0XBF59_7FC7,
0XC6E0_0BF3,
0XD5A7_9147,
0X06CA_6351,
0X1429_2967,
0X27B7_0A85,
0X2E1B_2138,
0X4D2C_6DFC,
0X5338_0D13,
0X650A_7354,
0X766A_0ABB,
0X81C2_C92E,
0X9272_2C85,
0XA2BF_E8A1,
0XA81A_664B,
0XC24B_8B70,
0XC76C_51A3,
0XD192_E819,
0XD699_0624,
0XF40E_3585,
0X106A_A070,
0X19A4_C116,
0X1E37_6C08,
0X2748_774C,
0X34B0_BCB5,
0X391C_0CB3,
0X4ED8_AA4A,
0X5B9C_CA4F,
0X682E_6FF3,
0X748F_82EE,
0X78A5_636F,
0X84C8_7814,
0X8CC7_0208,
0X90BE_FFFA,
0XA450_6CEB,
0XBEF9_A3F7,
0XC671_78F2,
]
a = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ (lowerCamelCase_ ):
"""simple docstring"""
a = b'\x80' + (b'\x00' * (63 - (len(__a ) + 8) % 64))
a = struct.pack(">Q" , (len(__a ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
a = list(struct.unpack(">16L" , __a ) )
# add 48 0-ed integers
words += [0] * 48
a = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
a = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
a = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
a = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0000_0000
# Compression
a = self.ror(__a , 6 ) ^ self.ror(__a , 11 ) ^ self.ror(__a , 25 )
a = (e & f) ^ ((~e & 0XFFFF_FFFF) & g)
a = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0000_0000
a = self.ror(__a , 2 ) ^ self.ror(__a , 13 ) ^ self.ror(__a , 22 )
a = (a & b) ^ (a & c) ^ (b & c)
a = (sa + maj) % 0X1_0000_0000
a = (
g,
f,
e,
((d + tempa) % 0X1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0X1_0000_0000),
)
a = [a, b, c, d, e, f, g, h]
# Modify final values
a = [
((element + mutated_hash_values[index]) % 0X1_0000_0000)
for index, element in enumerate(self.hashes )
]
a = ''.join([hex(__a )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return 0XFFFF_FFFF & (value << (32 - rotations)) | (value >> rotations)
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
import hashlib
a = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__a ).hash , hashlib.shaaaa(__a ).hexdigest() )
def a( ) -> Optional[Any]:
"""simple docstring"""
import doctest
doctest.testmod()
a = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
a = parser.parse_args()
a = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
a = f.read()
else:
a = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
print(SHAaaa(_SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 227 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__lowercase : Dict = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__lowercase : Tuple = torch.device('cuda', local_rank)
__lowercase : Optional[int] = socket.gethostname()
__lowercase : List[str] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : str = dist.get_rank()
__lowercase : Union[str, Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 27 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = torch.device("""cpu""")
def lowerCAmelCase__ ( ) -> str:
lowerCAmelCase__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : int = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = val
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCAmelCase__ : Dict = []
for k in state_dict.keys():
lowerCAmelCase__ : List[Any] = k
if ".pwconv" in k:
lowerCAmelCase__ : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
lowerCAmelCase__ : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
lowerCAmelCase__ : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
lowerCAmelCase__ : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
lowerCAmelCase__ : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
lowerCAmelCase__ : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
lowerCAmelCase__ : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase__ : List[str] = 1_000
lowerCAmelCase__ : Tuple = 'huggingface/label-files'
lowerCAmelCase__ : str = 'imagenet-1k-id2label.json'
lowerCAmelCase__ : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Any = idalabel
lowerCAmelCase__ : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase__ : Dict = [3, 3, 6, 4]
lowerCAmelCase__ : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase__ : Dict = [3, 3, 9, 6]
lowerCAmelCase__ : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase__ : Dict = [4, 3, 10, 5]
lowerCAmelCase__ : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase__ : Tuple = [4, 4, 12, 6]
lowerCAmelCase__ : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
lowerCAmelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase__ : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
lowerCAmelCase__ : Optional[Any] = checkpoint
lowerCAmelCase__ : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCAmelCase__ : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
lowerCAmelCase__ : Tuple = prepare_img()
lowerCAmelCase__ : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
lowerCAmelCase__ : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
lowerCAmelCase__ : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
lowerCamelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt) | 212 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase = 'src/diffusers'
# Matches is_xxx_available()
lowerCamelCase = re.compile(r'''is\_([a-z_]*)_available\(\)''')
# Matches from xxx import bla
lowerCamelCase = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
lowerCamelCase = '\n{0} = None\n'
lowerCamelCase = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
lowerCamelCase = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def UpperCAmelCase__ ( _A : Any ):
'''simple docstring'''
a__ =_re_backend.findall(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a__ =f.readlines()
# Get to the point we do the actual imports for type checking
a__ =0
a__ ={}
# Go through the end of the file
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
a__ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
a__ =[]
# Until we unindent, add backend objects to the list
while line_index < len(_SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
a__ =lines[line_index]
a__ =_re_single_line_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
a__ =objects
else:
line_index += 1
return backend_specific_objects
def UpperCAmelCase__ ( _A : int , _A : Dict ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(_SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _A : Tuple=None ):
'''simple docstring'''
if backend_specific_objects is None:
a__ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
a__ ={}
for backend, objects in backend_specific_objects.items():
a__ ='[' + ', '.join(F"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ']'
a__ ='# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for o in objects] )
a__ =dummy_file
return dummy_files
def UpperCAmelCase__ ( _A : List[str]=False ):
'''simple docstring'''
a__ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
a__ ={'torch': 'pt'}
# Locate actual dummy modules and read their content.
a__ =os.path.join(_SCREAMING_SNAKE_CASE , '''utils''' )
a__ ={
backend: os.path.join(_SCREAMING_SNAKE_CASE , F"""dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py""" )
for backend in dummy_files.keys()
}
a__ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
with open(_SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a__ =f.read()
else:
a__ =''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"""Updating diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py as the main """
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F"""diffusers.utils.dummy_{short_names.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` """
'''to fix this.''' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 188 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowercase : Tuple = pytest.mark.integration
__lowercase : Optional[int] = {'comet'}
__lowercase : List[str] = importlib.util.find_spec('fairseq') is not None
__lowercase : str = {'code_eval'}
__lowercase : List[Any] = os.name == 'nt'
__lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase ():
__a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@local
class __UpperCamelCase ( parameterized.TestCase ):
A_ = {}
A_ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = '[...]'
__a : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
__a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__a : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = '[...]'
__a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def __UpperCAmelCase ( self ):
'''simple docstring'''
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join('metrics' , __a ) , *__a , **__a )
with patch('datasets.load_metric' ) as mock_load_metric:
__a : Dict = load_local_metric
yield
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
def wrapper(__a ):
__a : Optional[Any] = contextmanager(__a )
__a : str = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__a : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
import torch
def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__a : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ):
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
assert len(__a ) == 2
__a : Dict = [0.19, 0.92]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__a : str = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__a : int = load_from_checkpoint
yield
def lowerCamelCase ():
__a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) )
__a : List[str] = 'ERROR'
__a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
| 27 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
_UpperCAmelCase : Optional[Any] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
_UpperCAmelCase : Dict = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class lowercase ( lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE : Tuple = BartTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __a ) != add_prefix_space:
snake_case_ = getattr(__a , pre_tok_state.pop('type' ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**__a )
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'] )
if "cls" in state:
snake_case_ = tuple(state['cls'] )
snake_case_ = False
if state.get('add_prefix_space' , __a ) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets' , __a ) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(__a , state.pop('type' ) )
snake_case_ = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
def a ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def a ( self , snake_case ):
snake_case_ = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
snake_case_ = value
def a ( self , *snake_case , **snake_case ):
snake_case_ = kwargs.get('is_split_into_words' , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__a , **__a )
def a ( self , *snake_case , **snake_case ):
snake_case_ = kwargs.get('is_split_into_words' , __a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__a , **__a )
def a ( self , snake_case , snake_case = None ):
snake_case_ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def a ( self , snake_case , snake_case=None ):
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 285 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] )
__a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] )
else:
__a : int = np.asarray(__a )
__a : str = np.asarray(__a )
if ignore_case:
__a : Dict = np.char.lower(__a )
__a : List[str] = np.char.lower(__a )
if ignore_punctuation:
__a : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Dict = np.char.translate(__a , table=__a )
if ignore_numbers:
__a : Optional[int] = string.digits.maketrans('' , '' , string.digits )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Optional[int] = np.char.translate(__a , table=__a )
__a : Any = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 27 | 0 |
"""simple docstring"""
def __magic_name__ ( __snake_case : str ) -> Dict:
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
lowercase : int = ''
while len(_SCREAMING_SNAKE_CASE ) % 3 != 0:
lowercase : str = '0' + bin_string
lowercase : List[str] = [
bin_string[index : index + 3]
for index in range(len(_SCREAMING_SNAKE_CASE ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowercase : int = 0
for index, val in enumerate(_SCREAMING_SNAKE_CASE ):
oct_val += int(2 ** (2 - index) * int(_SCREAMING_SNAKE_CASE ) )
oct_string += str(_SCREAMING_SNAKE_CASE )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 202 |
'''simple docstring'''
import os
import sys
__lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 27 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : int = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 121 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.