code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE_ = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCamelCase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ = 'lower'
lowerCamelCase_ = ['low', 'er</w>']
lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = tokens + ['<unk>']
lowerCamelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_=15 ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Simple input
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ = ('This is a simple input', 'This is a pair')
lowerCamelCase_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
pass
| 42 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase__ = None
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
elif name.split('''.''' )[0] == "proj":
lowercase__ = fairseq_model.proj
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
lowercase__ = '''weight'''
else:
lowercase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = f.readlines()
lowercase__ = [line.split(''' ''' )[0] for line in lines]
lowercase__ = len(SCREAMING_SNAKE_CASE )
lowercase__ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowercase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(SCREAMING_SNAKE_CASE )
lowercase__ = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
lowercase__ = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowercase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase__ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
lowercase__ = False
# add projection layer
lowercase__ = nn.Parameter(projection_layer.weight )
lowercase__ = nn.Parameter(projection_layer.bias )
lowercase__ = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = '''speech_to_text_2'''
lowercase__ = '''wav2vec2'''
lowercase__ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0224, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 43 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'vocab.txt'}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
UpperCAmelCase_ : List[Any] = {
'openbmb/cpm-ant-10b': 1024,
}
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Tuple = collections.OrderedDict()
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as reader:
_lowerCamelCase : List[Any] = reader.readlines()
for index, token in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Any = token.rstrip("\n" )
_lowerCamelCase : Union[str, Any] = index
return vocab
class UpperCAmelCase__ ( A ):
def __init__( self : List[str],__A : Union[str, Any],__A : List[Any]="<unk>",__A : List[str]=2_0_0 ):
_lowerCamelCase : List[Any] = vocab
_lowerCamelCase : Optional[Any] = unk_token
_lowerCamelCase : List[str] = max_input_chars_per_word
def lowerCamelCase_ ( self : Optional[int],__A : Any ):
_lowerCamelCase : List[str] = list(__A )
if len(__A ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Tuple = []
while start < len(__A ):
_lowerCamelCase : List[str] = len(__A )
_lowerCamelCase : Any = None
while start < end:
_lowerCamelCase : Union[str, Any] = "".join(chars[start:end] )
if substr in self.vocab:
_lowerCamelCase : Optional[int] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__A )
_lowerCamelCase : Dict = end
return sub_tokens
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
lowerCAmelCase_ = False
def __init__( self : List[str],__A : Dict,__A : Union[str, Any]="<d>",__A : Union[str, Any]="</d>",__A : Optional[Any]="<s>",__A : Any="</s>",__A : Optional[Any]="<pad>",__A : Tuple="<unk>",__A : Any="</n>",__A : Tuple="</_>",__A : Tuple="left",**__A : List[str],):
requires_backends(self,["jieba"] )
super().__init__(
bod_token=__A,eod_token=__A,bos_token=__A,eos_token=__A,pad_token=__A,unk_token=__A,line_token=__A,space_token=__A,padding_side=__A,**__A,)
_lowerCamelCase : int = bod_token
_lowerCamelCase : Any = eod_token
_lowerCamelCase : Optional[Any] = load_vocab(__A )
_lowerCamelCase : Union[str, Any] = self.encoder[space_token]
_lowerCamelCase : Optional[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items(),key=lambda __A : x[1] ) )
_lowerCamelCase : List[str] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : int = WordpieceTokenizer(vocab=self.encoder,unk_token=self.unk_token )
@property
def lowerCamelCase_ ( self : Dict ):
return self.encoder[self.bod_token]
@property
def lowerCamelCase_ ( self : List[str] ):
return self.encoder[self.eod_token]
@property
def lowerCamelCase_ ( self : List[str] ):
return self.encoder["\n"]
@property
def lowerCamelCase_ ( self : Any ):
return len(self.encoder )
def lowerCamelCase_ ( self : Optional[Any] ):
return dict(self.encoder,**self.added_tokens_encoder )
def lowerCamelCase_ ( self : str,__A : int ):
_lowerCamelCase : Tuple = []
for x in jieba.cut(__A,cut_all=__A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__A ) )
return output_tokens
def lowerCamelCase_ ( self : Optional[Any],__A : str,**__A : List[Any] ):
_lowerCamelCase : List[str] = [i for i in token_ids if i >= 0]
_lowerCamelCase : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__A,**__A )
def lowerCamelCase_ ( self : Tuple,__A : Optional[Any] ):
return token in self.encoder
def lowerCamelCase_ ( self : Any,__A : List[str] ):
return "".join(__A )
def lowerCamelCase_ ( self : Tuple,__A : Optional[Any] ):
return self.encoder.get(__A,self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : Union[str, Any],__A : Union[str, Any] ):
return self.decoder.get(__A,self.unk_token )
def lowerCamelCase_ ( self : str,__A : str,__A : Optional[str] = None ):
if os.path.isdir(__A ):
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
_lowerCamelCase : Optional[Any] = (filename_prefix + "-" if filename_prefix else "") + save_directory
_lowerCamelCase : Any = 0
if " " in self.encoder:
_lowerCamelCase : Optional[Any] = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCamelCase : List[Any] = self.encoder["\n"]
del self.encoder["\n"]
_lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items(),key=lambda __A : x[1] ) )
with open(__A,"w",encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
_lowerCamelCase : Dict = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def lowerCamelCase_ ( self : List[Any],__A : List[int],__A : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase_ ( self : Optional[int],__A : List[int],__A : Optional[List[int]] = None,__A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A,token_ids_a=__A,already_has_special_tokens=__A )
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A ))
return [1] + ([0] * len(__A )) | 44 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 0 |
from __future__ import annotations
UpperCamelCase = 8.988E9 # units = N * m^s * C^-2
def A ( lowercase__ : float , lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
UpperCamelCase__ :Optional[int] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
UpperCamelCase__ :Any = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
UpperCamelCase__ :List[Any] = abs(lowercase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
UpperCamelCase__ :int = abs(lowercase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
UpperCamelCase__ :Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowercase__ )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 0 |
"""simple docstring"""
_lowerCAmelCase : Any = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : int = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_lowerCamelCase : Stack[int] = Stack()
_lowerCamelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCamelCase )
elif i == ")":
# RULE 4
_lowerCamelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : Tuple = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : List[str] = operators[opr](_lowerCamelCase , _lowerCamelCase )
operand_stack.push(_lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''') | 46 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def UpperCAmelCase__ ( lowerCamelCase_ : float ):
if num <= 0:
raise ValueError('math domain error' )
return quad(lowerCamelCase_ , 0 , lowerCamelCase_ , args=(lowerCamelCase_) )[0]
def UpperCAmelCase__ ( lowerCamelCase_ : float , lowerCamelCase_ : float ):
return math.pow(lowerCamelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = ['pixel_values']
def __init__( self : Union[str, Any] , __magic_name__ : bool = True , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 255 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 256}
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ = get_resize_output_image_size(__magic_name__ , size=size["shortest_edge"] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__magic_name__ , size=(size["height"], size["width"]) , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Tuple , ):
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(__magic_name__ , param_name="crop_size" )
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : List[Tuple] = None ):
"""simple docstring"""
lowerCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(__magic_name__ ):
lowerCAmelCase__ = target_sizes.numpy()
lowerCAmelCase__ = []
for idx in range(len(__magic_name__ ) ):
lowerCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__magic_name__ )
lowerCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__magic_name__ )
else:
lowerCAmelCase__ = logits.argmax(dim=1 )
lowerCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 48 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :list ):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__UpperCAmelCase = grid[0]
for row_n in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = grid[row_n]
__UpperCAmelCase = fill_row(snake_case_ , snake_case_ )
__UpperCAmelCase = grid[row_n]
return grid[-1][-1]
def lowercase__ ( snake_case_ :list , snake_case_ :list ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(snake_case_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 0 |
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
self.assertEqual(len(_lowerCAmelCase ) ,len(_lowerCAmelCase ) )
for a, b in zip(_lowerCAmelCase ,_lowerCAmelCase ):
self.assertAlmostEqual(_lowerCAmelCase ,_lowerCAmelCase ,delta=_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowerCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step ,3 )
self.assertEqual(len(accumulator.gradients ) ,1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2 )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
ops.enable_eager_execution_internal()
lowerCamelCase__ = tf.config.list_physical_devices("""CPU""" )
if len(_lowerCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase__ = tf.config.list_logical_devices(device_type="""CPU""" )
lowerCamelCase__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase__ = GradientAccumulator()
lowerCamelCase__ = tf.Variable([4.0, 3.0] )
lowerCamelCase__ , lowerCamelCase__ = create_optimizer(5E-5 ,10 ,5 )
lowerCamelCase__ = tf.Variable([0.0, 0.0] ,trainable=_lowerCAmelCase )
def accumulate_on_replica(_lowerCAmelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) )
@tf.function
def accumulate(_lowerCAmelCase ,_lowerCAmelCase ):
with strategy.scope():
lowerCamelCase__ = strategy.experimental_local_results(_lowerCAmelCase )
local_variables[0].assign(_lowerCAmelCase )
local_variables[1].assign(_lowerCAmelCase )
strategy.run(_lowerCAmelCase ,args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowerCAmelCase )
def _check_local_values(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() ,_lowerCAmelCase ,tol=1E-2 )
self.assertListAlmostEqual(values[1].value() ,_lowerCAmelCase ,tol=1E-2 )
accumulate([1.0, 2.0] ,[-1.0, 1.0] )
accumulate([3.0, -1.0] ,[-1.0, -1.0] )
accumulate([-2.0, 2.0] ,[3.0, -2.0] )
self.assertEqual(accumulator.step ,3 )
_check_local_values([2.0, 3.0] ,[1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
_check_local_values([0.0, 0.0] ,[0.0, 0.0] )
| 50 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
UpperCAmelCase = subparsers.add_parser('''test''' )
else:
UpperCAmelCase = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase = script_name
else:
UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
UpperCAmelCase = ['''accelerate-launch'''] + test_args.split()
UpperCAmelCase = execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = test_command_parser()
UpperCAmelCase = parser.parse_args()
test_command(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 51 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 0 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[int] = [10, 20, 30, 40, 50, 60]
__a : Union[str, Any] = [2, 4, 6, 8, 10, 12]
__a : List[str] = 100
self.assertEqual(kp.calc_profit(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 210 )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Weight can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''Profit can not be negative.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(_UpperCAmelCase , '''max_weight must greater than zero.''' )
def _lowerCamelCase ( self ):
self.assertRaisesRegex(
_UpperCAmelCase , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main() | 52 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = int(number**0.5 )
return number == sq * sq
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
__lowerCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__lowerCAmelCase = x_den * y_den * z_den
__lowerCAmelCase = gcd(lowerCAmelCase_, lowerCAmelCase_ )
top //= hcf
bottom //= hcf
return top, bottom
def a_ ( lowerCAmelCase_ : int = 35 ):
__lowerCAmelCase = set()
__lowerCAmelCase = 42
__lowerCAmelCase = Fraction(0 )
__lowerCAmelCase = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
__lowerCAmelCase = x_num * y_den + x_den * y_num
__lowerCAmelCase = x_den * y_den
__lowerCAmelCase = gcd(lowerCAmelCase_, lowerCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCAmelCase = add_three(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
unique_s.add(lowerCAmelCase_ )
# n=2
__lowerCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__lowerCAmelCase = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase_ ) and is_sq(lowerCAmelCase_ ):
__lowerCAmelCase = int(sqrt(lowerCAmelCase_ ) )
__lowerCAmelCase = int(sqrt(lowerCAmelCase_ ) )
__lowerCAmelCase = gcd(lowerCAmelCase_, lowerCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCAmelCase = add_three(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
unique_s.add(lowerCAmelCase_ )
# n=-1
__lowerCAmelCase = x_num * y_num
__lowerCAmelCase = x_den * y_num + x_num * y_den
__lowerCAmelCase = gcd(lowerCAmelCase_, lowerCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCAmelCase = add_three(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
unique_s.add(lowerCAmelCase_ )
# n=2
__lowerCAmelCase = x_num * x_num * y_num * y_num
__lowerCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase_ ) and is_sq(lowerCAmelCase_ ):
__lowerCAmelCase = int(sqrt(lowerCAmelCase_ ) )
__lowerCAmelCase = int(sqrt(lowerCAmelCase_ ) )
__lowerCAmelCase = gcd(lowerCAmelCase_, lowerCAmelCase_ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCAmelCase = add_three(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
unique_s.add(lowerCAmelCase_ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase_, lowerCAmelCase_ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 53 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 0 |
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ =[p / w for p, w in zip(lowercase__ , lowercase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ =sorted(lowercase__ )
# declaring useful variables
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =0
UpperCAmelCase_ =0
UpperCAmelCase_ =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ =sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ =profit_by_weight.index(lowercase__ )
UpperCAmelCase_ =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
__lowercase : List[str] =[int(x) for x in input("""Input profits separated by spaces: """).split()]
__lowercase : Union[str, Any] =[int(x) for x in input("""Input weights separated by spaces: """).split()]
__lowercase : Tuple =int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 54 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 0 |
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
__A = []
__A = set({"(", "[", "{"} )
__A = set({")", "]", "}"} )
__A = {"{": "}", "[": "]", "(": ")"}
for i in range(len(a_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(a_ ) == 0 or (len(a_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(a_ ) == 0
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
__A = input("Enter sequence of brackets: " )
if is_balanced(a_ ):
print(a_ , "is balanced" )
else:
print(a_ , "is not balanced" )
if __name__ == "__main__":
main()
| 55 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = OpenAIGPTTokenizer
_SCREAMING_SNAKE_CASE : int = OpenAIGPTTokenizerFast
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Tuple = False
def a ( self : List[str] ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
def a ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
return "lower newer", "lower newer"
def a ( self : List[str] ) -> Tuple:
__snake_case = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__snake_case = 'lower'
__snake_case = ['low', 'er</w>']
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = tokens + ['<unk>']
__snake_case = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=15 ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Simple input
__snake_case = 'This is a simple input'
__snake_case = ['This is a simple input 1', 'This is a simple input 2']
__snake_case = ('This is a simple input', 'This is a pair')
__snake_case = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='max_length' , )
def a ( self : Union[str, Any] ) -> Any:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _lowercase ( __lowercase ):
pass
| 56 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Any = mock.Mock()
UpperCamelCase_: Optional[int] = 5_0_0
UpperCamelCase_: Optional[int] = {}
UpperCamelCase_: List[str] = HTTPError
UpperCamelCase_: str = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: Dict = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Tuple = mock.Mock()
UpperCamelCase_: Union[str, Any] = 5_0_0
UpperCamelCase_: Optional[int] = {}
UpperCamelCase_: str = HTTPError
UpperCamelCase_: str = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: Dict = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: Optional[int] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase_: Optional[int] = tempfile.mktemp()
with open(_lowerCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , _lowerCamelCase )
UpperCamelCase_: Tuple = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase_: str = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _a ( cls ):
UpperCamelCase_: Optional[int] = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Union[str, Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Union[str, Any] = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Union[str, Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id='test-tokenizer' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: Optional[Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: int = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Dict = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase_: Optional[int] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: Optional[int] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Tuple = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Optional[int] = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: str = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: str = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
UpperCamelCase_: Any = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Dict = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase_: str = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: str = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _a ( self ):
UpperCamelCase_: Optional[int] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def _a ( self ):
UpperCamelCase_: int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: Tuple = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: List[Any] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def _a ( self ):
UpperCamelCase_: Dict = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def _a ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase_: Tuple = Trie()
UpperCamelCase_: Optional[int] = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ['AB', 'C'] ) | 57 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 0 |
"""simple docstring"""
from __future__ import annotations
__lowerCAmelCase : Optional[Any] = '''#'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
snake_case_ : dict = {}
def UpperCAmelCase__ ( self , _lowercase ) -> None:
'''simple docstring'''
snake_case_ : Optional[int] = self._trie
for char in text:
if char not in trie:
snake_case_ : Dict = {}
snake_case_ : List[str] = trie[char]
snake_case_ : List[Any] = True
def UpperCAmelCase__ ( self , _lowercase ) -> tuple | list:
'''simple docstring'''
snake_case_ : Optional[Any] = self._trie
for char in prefix:
if char in trie:
snake_case_ : Any = trie[char]
else:
return []
return self._elements(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for c, v in d.items():
snake_case_ : str = [""" """] if c == END else [(c + s) for s in self._elements(_lowercase )]
result.extend(_lowercase )
return tuple(_lowercase )
__lowerCAmelCase : List[str] = Trie()
__lowerCAmelCase : List[str] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def __lowerCAmelCase ( ):
'''simple docstring'''
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "hidden_sizes"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "num_attention_heads"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "num_encoder_blocks"))
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Dict=[2, 2, 2, 2] , UpperCAmelCase_ : Dict=[8, 4, 2, 1] , UpperCAmelCase_ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase_ : str=[1, 4, 8, 16] , UpperCAmelCase_ : Optional[int]=[1, 2, 4, 8] , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[Any]=None , ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =parent
lowerCamelCase__: int =batch_size
lowerCamelCase__: Tuple =image_size
lowerCamelCase__: List[Any] =num_channels
lowerCamelCase__: Dict =num_encoder_blocks
lowerCamelCase__: Dict =sr_ratios
lowerCamelCase__: Union[str, Any] =depths
lowerCamelCase__: Tuple =hidden_sizes
lowerCamelCase__: Any =downsampling_rates
lowerCamelCase__: int =num_attention_heads
lowerCamelCase__: Tuple =is_training
lowerCamelCase__: Any =use_labels
lowerCamelCase__: str =hidden_act
lowerCamelCase__: Tuple =hidden_dropout_prob
lowerCamelCase__: Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__: int =initializer_range
lowerCamelCase__: int =num_labels
lowerCamelCase__: List[Any] =scope
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Union[str, Any] =None
if self.use_labels:
lowerCamelCase__: List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCamelCase__: List[str] =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =SegformerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =self.num_labels
lowerCamelCase__: Any =SegformerForSemanticSegmentation(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Any =model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =1
lowerCamelCase__: Union[str, Any] =SegformerForSemanticSegmentation(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: str =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(UpperCAmelCase_)
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertGreater(result.loss , 0.0)
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =config_and_inputs
lowerCamelCase__: List[str] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =SegformerModelTester(self)
lowerCamelCase__: str =SegformerConfigTester(self , config_class=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
lowerCamelCase__: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCAmelCase_)
@unittest.skip("SegFormer does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods")
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Union[str, Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: Dict =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: int =[*signature.parameters.keys()]
lowerCamelCase__: str =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: List[Any] =True
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[int] =True
lowerCamelCase__: Dict =False
lowerCamelCase__: Any =True
lowerCamelCase__: str =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Optional[int] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Tuple =outputs.attentions
lowerCamelCase__: List[Any] =sum(self.model_tester.depths)
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__: Optional[Any] =True
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Optional[Any] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: List[Any] =outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# verify the first attentions (first block, first layer)
lowerCamelCase__: Dict =(self.model_tester.image_size // 4) ** 2
lowerCamelCase__: int =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCamelCase__: Any =(self.model_tester.image_size // 32) ** 2
lowerCamelCase__: str =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCamelCase__: int =len(UpperCAmelCase_)
# Check attention is always last and order is fine
lowerCamelCase__: Union[str, Any] =True
lowerCamelCase__: List[Any] =True
lowerCamelCase__: Dict =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Union[str, Any] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
self.assertEqual(out_len + 1 , len(UpperCAmelCase_))
lowerCamelCase__: Any =outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# verify the first attentions (first block, first layer)
lowerCamelCase__: Union[str, Any] =(self.model_tester.image_size // 4) ** 2
lowerCamelCase__: Any =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]):
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: str =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Any =outputs.hidden_states
lowerCamelCase__: Any =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase__ , lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Tuple =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: Any =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase__ , lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: Tuple =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCAmelCase_):
continue
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
lowerCamelCase__: Tuple =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =model(**UpperCAmelCase_).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Union[str, Any] =SegformerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Any =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase_ , align=UpperCAmelCase_ , do_random_crop=UpperCAmelCase_)
lowerCamelCase__: Tuple =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
UpperCAmelCase_)
lowerCamelCase__: Tuple =prepare_img()
lowerCamelCase__: Union[str, Any] =image_processor(images=UpperCAmelCase_ , return_tensors="pt")
lowerCamelCase__: List[str] =encoded_inputs.pixel_values.to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: Dict =model(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase_ , align=UpperCAmelCase_ , do_random_crop=UpperCAmelCase_)
lowerCamelCase__: str =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024").to(UpperCAmelCase_)
lowerCamelCase__: Tuple =prepare_img()
lowerCamelCase__: Optional[Any] =image_processor(images=UpperCAmelCase_ , return_tensors="pt")
lowerCamelCase__: Tuple =encoded_inputs.pixel_values.to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: List[Any] =model(UpperCAmelCase_)
lowerCamelCase__: Any =torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-1))
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Dict:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase_ , align=UpperCAmelCase_ , do_random_crop=UpperCAmelCase_)
lowerCamelCase__: str =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to(
UpperCAmelCase_)
lowerCamelCase__: int =prepare_img()
lowerCamelCase__: Tuple =image_processor(images=UpperCAmelCase_ , return_tensors="pt")
lowerCamelCase__: Any =encoded_inputs.pixel_values.to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)
lowerCamelCase__: Dict =outputs.logits.detach().cpu()
lowerCamelCase__: Any =image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(500, 300)])
lowerCamelCase__: List[str] =torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , UpperCAmelCase_)
lowerCamelCase__: Tuple =image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_)
lowerCamelCase__: Dict =torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , UpperCAmelCase_)
| 59 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 0 |
import math
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( _UpperCamelCase = 10_001 ) -> int:
"""simple docstring"""
try:
snake_case_ : Union[str, Any] = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ : list[int] = []
snake_case_ : Optional[int] = 2
while len(_UpperCamelCase ) < nth:
if is_prime(_UpperCamelCase ):
primes.append(_UpperCamelCase )
num += 1
else:
num += 1
return primes[len(_UpperCamelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowerCamelCase :
"""simple docstring"""
snake_case__ = 42
snake_case__ = None
snake_case__ = None
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Node(1 )
lowerCAmelCase__ = Node(2 )
lowerCAmelCase__ = Node(3 )
lowerCAmelCase__ = Node(4 )
lowerCAmelCase__ = Node(5 )
return tree
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
lowerCAmelCase__ = []
if root is None:
return output
lowerCAmelCase__ = deque([root] )
while process_queue:
lowerCAmelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
def populate_output(lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase_ , lowerCAmelCase_ )
return output
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
if root is None:
return []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = height(lowerCAmelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowerCAmelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase_ , lowerCAmelCase_ ) )
lowerCAmelCase__ = 0
return output
def _A ( ): # Main function for testing.
"""simple docstring"""
lowerCAmelCase__ = make_tree()
print(F'In-order Traversal: {inorder(lowerCAmelCase_ )}' )
print(F'Pre-order Traversal: {preorder(lowerCAmelCase_ )}' )
print(F'Post-order Traversal: {postorder(lowerCAmelCase_ )}' , "\n" )
print(F'Height of Tree: {height(lowerCAmelCase_ )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowerCAmelCase_ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowerCAmelCase_ ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(lowerCAmelCase_ , level=lowerCAmelCase_ ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = '''xlm-prophetnet'''
UpperCamelCase_ : Tuple = ['''past_key_values''']
UpperCamelCase_ : int = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : Dict , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[Union[str, Callable]] = "gelu" , UpperCAmelCase_ : Optional[int] = 3_0522 , UpperCAmelCase_ : Optional[int] = 1024 , UpperCAmelCase_ : Optional[int] = 4096 , UpperCAmelCase_ : Optional[int] = 12 , UpperCAmelCase_ : Optional[int] = 16 , UpperCAmelCase_ : Optional[int] = 4096 , UpperCAmelCase_ : Optional[int] = 12 , UpperCAmelCase_ : Optional[int] = 16 , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[int] = 512 , UpperCAmelCase_ : Optional[float] = 0.02 , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[int] = 0 , UpperCAmelCase_ : Optional[int] = 2 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[float] = 0.0 , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[int] = 0 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 2 , **UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = num_encoder_layers
SCREAMING_SNAKE_CASE : Any = num_encoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = num_decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = num_decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE : Dict = ngram
SCREAMING_SNAKE_CASE : Any = num_buckets
SCREAMING_SNAKE_CASE : str = relative_max_distance
SCREAMING_SNAKE_CASE : str = disable_ngram_loss
SCREAMING_SNAKE_CASE : Dict = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : int = use_cache
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , add_cross_attention=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
@property
def _A ( self : int ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _A ( self : str , UpperCAmelCase_ : Optional[Any] ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 62 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Tuple = CLIPConfig
a : Optional[Any] = ['CLIPEncoderLayer']
def __init__( self : List[str] , __lowercase : CLIPConfig ) -> List[Any]:
super().__init__(__lowercase )
__UpperCAmelCase : Any = CLIPVisionModelWithProjection(config.vision_config )
__UpperCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 )
__UpperCAmelCase : List[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase ( self : Any , __lowercase : List[str] , __lowercase : Dict , __lowercase : Any=0.5 , __lowercase : Dict=0.5 ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.vision_model(__lowercase )[0]
__UpperCAmelCase : List[str] = self.p_head(__lowercase )
__UpperCAmelCase : Dict = nsfw_detected.flatten()
__UpperCAmelCase : List[Any] = nsfw_detected > p_threshold
__UpperCAmelCase : Dict = nsfw_detected.tolist()
if any(__lowercase ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(__lowercase ):
if nsfw_detected_:
__UpperCAmelCase : Optional[Any] = np.zeros(images[idx].shape )
__UpperCAmelCase : Optional[int] = self.w_head(__lowercase )
__UpperCAmelCase : Tuple = watermark_detected.flatten()
__UpperCAmelCase : List[Any] = watermark_detected > w_threshold
__UpperCAmelCase : int = watermark_detected.tolist()
if any(__lowercase ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(__lowercase ):
if watermark_detected_:
__UpperCAmelCase : Optional[Any] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 63 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCamelCase :
@property
def UpperCamelCase_ ( self ) -> Tuple:
return self.get_dummy_input()
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def UpperCamelCase_ ( self , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , ) -> str:
SCREAMING_SNAKE_CASE__: str= 4
SCREAMING_SNAKE_CASE__: List[str]= 32
SCREAMING_SNAKE_CASE__: Any= (32, 32)
SCREAMING_SNAKE_CASE__: int= torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= torch.device(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE__: Union[str, Any]= randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''hidden_states''': hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE__: Optional[Any]= 128
SCREAMING_SNAKE_CASE__: Union[str, Any]= randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase , device=lowerCAmelCase )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE__: List[Any]= torch.manual_seed(1 )
SCREAMING_SNAKE_CASE__: Optional[Any]= (randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE__: Optional[Any]= floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase )
if include_skip_sample:
SCREAMING_SNAKE_CASE__: int= randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase , device=lowerCAmelCase )
return dummy_input
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: int= {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE__: Optional[Any]= 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
SCREAMING_SNAKE_CASE__: List[str]= self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: Any= self.block_class(**lowerCAmelCase )
unet_block.to(lowerCAmelCase )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Dict= unet_block(**lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Tuple= output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE__: List[str]= output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE__: Tuple= torch.tensor(lowerCAmelCase ).to(lowerCAmelCase )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: Tuple= self.block_class(**lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Dict= model(**lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Any= output[0]
SCREAMING_SNAKE_CASE__: List[Any]= torch.device(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= randn_tensor(output.shape , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= torch.nn.functional.mse_loss(lowerCAmelCase , lowerCAmelCase )
loss.backward()
| 64 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if gpta_config_file == "":
UpperCAmelCase__ : Dict = GPTaConfig()
else:
UpperCAmelCase__ : Optional[int] = GPTaConfig.from_json_file(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = GPTaModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
UpperCAmelCase__ : Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCAmelCase__ : Any = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __UpperCamelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__UpperCAmelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 65 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowerCAmelCase_ ( __snake_case , __snake_case ):
_UpperCamelCase : List[Any] = "swin"
_UpperCamelCase : Tuple = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _lowerCAmelCase=2_2_4 , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=9_6 , _lowerCAmelCase=[2, 2, 6, 2] , _lowerCAmelCase=[3, 6, 1_2, 2_4] , _lowerCAmelCase=7 , _lowerCAmelCase=4.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=False , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=3_2 , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
_lowercase : Dict = image_size
_lowercase : Optional[int] = patch_size
_lowercase : str = num_channels
_lowercase : List[Any] = embed_dim
_lowercase : Optional[int] = depths
_lowercase : Any = len(_lowerCAmelCase )
_lowercase : Tuple = num_heads
_lowercase : List[Any] = window_size
_lowercase : int = mlp_ratio
_lowercase : Any = qkv_bias
_lowercase : Any = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[str] = drop_path_rate
_lowercase : Dict = hidden_act
_lowercase : Tuple = use_absolute_embeddings
_lowercase : Any = layer_norm_eps
_lowercase : Dict = initializer_range
_lowercase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowercase : int = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
_lowercase : str = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase ) + 1 )]
_lowercase , _lowercase : Any = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = version.parse("1.11" )
@property
def __a ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ):
return 1E-4
| 66 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
snake_case = logging.get_logger(__name__)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str ,*__A : Dict ,**__A : List[Any] ) -> None:
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' ,__A ,)
super().__init__(*__A ,**__A ) | 67 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 0 |
import argparse
import copy
def lowercase__ ( A_: Tuple ) -> Dict:
"""simple docstring"""
__UpperCAmelCase ={}
with open(A_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__UpperCAmelCase =[]
_list.append([line.split()[1], line.split()[2]] )
__UpperCAmelCase =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__UpperCAmelCase =[]
_list.append([line.split()[0], line.split()[2]] )
__UpperCAmelCase =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase__ ( A_: str , A_: List[str] ) -> Optional[Any]:
"""simple docstring"""
with open(A_ ) as f:
__UpperCAmelCase =f.read(1 )
__UpperCAmelCase =start_node
__UpperCAmelCase =[]
__UpperCAmelCase =start_node
__UpperCAmelCase =0
while visiting not in first_solution:
__UpperCAmelCase =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(A_ ) and k[0] not in first_solution:
__UpperCAmelCase =k[1]
__UpperCAmelCase =k[0]
first_solution.append(A_ )
__UpperCAmelCase =distance_of_first_solution + int(A_ )
__UpperCAmelCase =best_node
first_solution.append(A_ )
__UpperCAmelCase =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__UpperCAmelCase =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowercase__ ( A_: Any , A_: Optional[Any] ) -> str:
"""simple docstring"""
__UpperCAmelCase =[]
for n in solution[1:-1]:
__UpperCAmelCase =solution.index(A_ )
for kn in solution[1:-1]:
__UpperCAmelCase =solution.index(A_ )
if n == kn:
continue
__UpperCAmelCase =copy.deepcopy(A_ )
__UpperCAmelCase =kn
__UpperCAmelCase =n
__UpperCAmelCase =0
for k in _tmp[:-1]:
__UpperCAmelCase =_tmp[_tmp.index(A_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__UpperCAmelCase =distance + int(i[1] )
_tmp.append(A_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__UpperCAmelCase =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda A_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase__ ( A_: List[Any] , A_: List[str] , A_: Union[str, Any] , A_: Optional[Any] , A_: Optional[Any] ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =1
__UpperCAmelCase =first_solution
__UpperCAmelCase =[]
__UpperCAmelCase =distance_of_first_solution
__UpperCAmelCase =solution
while count <= iters:
__UpperCAmelCase =find_neighborhood(A_ , A_ )
__UpperCAmelCase =0
__UpperCAmelCase =neighborhood[index_of_best_solution]
__UpperCAmelCase =len(A_ ) - 1
__UpperCAmelCase =False
while not found:
__UpperCAmelCase =0
while i < len(A_ ):
if best_solution[i] != solution[i]:
__UpperCAmelCase =best_solution[i]
__UpperCAmelCase =solution[i]
break
__UpperCAmelCase =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__UpperCAmelCase =True
__UpperCAmelCase =best_solution[:-1]
__UpperCAmelCase =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__UpperCAmelCase =cost
__UpperCAmelCase =solution
else:
__UpperCAmelCase =index_of_best_solution + 1
__UpperCAmelCase =neighborhood[index_of_best_solution]
if len(A_ ) >= size:
tabu_list.pop(0 )
__UpperCAmelCase =count + 1
return best_solution_ever, best_cost
def lowercase__ ( A_: Union[str, Any]=None ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase =generate_neighbours(args.File )
__UpperCAmelCase , __UpperCAmelCase =generate_first_solution(
args.File , A_ )
__UpperCAmelCase , __UpperCAmelCase =tabu_search(
A_ , A_ , A_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 68 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> int:
assert column_title.isupper()
__snake_case = 0
__snake_case = len(_UpperCAmelCase ) - 1
__snake_case = 0
while index >= 0:
__snake_case = (ord(column_title[index] ) - 64) * pow(26 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 69 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = IFInpaintingSuperResolutionPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def a__ ( self : str , A_ : List[str] , A_ : Tuple=0 ) -> Tuple:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(A_ )
else:
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
lowerCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 70 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = path_or_paths
UpperCAmelCase_ : Optional[int] = split if split or isinstance(_snake_case ,_snake_case ) else "train"
UpperCAmelCase_ : Tuple = features
UpperCAmelCase_ : int = cache_dir
UpperCAmelCase_ : Optional[Any] = keep_in_memory
UpperCAmelCase_ : Any = streaming
UpperCAmelCase_ : List[str] = num_proc
UpperCAmelCase_ : int = kwargs
@abstractmethod
def UpperCamelCase__ ( self ):
pass
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Union[str, Any] = features
UpperCAmelCase_ : Dict = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : int = streaming
UpperCAmelCase_ : Tuple = num_proc
UpperCAmelCase_ : Optional[Any] = kwargs
@abstractmethod
def UpperCamelCase__ ( self ):
pass
| 71 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : str ) -> bool:
'''simple docstring'''
lowercase =0
for ch in input_str:
lowercase =ord(lowercase_ )
lowercase =pow(2 , lowercase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : List[str] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _snake_case ( A__ ):
_lowercase : int = '''yolos'''
def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1E-12 , a=[512, 864] , a=16 , a=3 , a=True , a=100 , a=True , a=False , a=1 , a=5 , a=2 , a=5 , a=2 , a=0.1 , **a , ) -> Dict:
super().__init__(**a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = num_detection_tokens
SCREAMING_SNAKE_CASE = use_mid_position_embeddings
SCREAMING_SNAKE_CASE = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
class _snake_case ( A__ ):
_lowercase : int = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 12
| 73 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
__SCREAMING_SNAKE_CASE : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = False
# Check that strings has a valid type
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(_A )}.''' )
__SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
__SCREAMING_SNAKE_CASE : Dict = [html_strings]
# Get nodes + xpaths
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
for html_string in html_strings:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A )
nodes.append(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
__SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
__SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 74 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 0 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = 0 , __UpperCamelCase = 0 ):
__lowercase : List[Any] = end or len(__UpperCamelCase )
for i in range(__UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = i
__lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__lowercase : Union[str, Any] = array[temp_index - 1]
temp_index -= 1
__lowercase : List[str] = temp_index_value
return array
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # Max Heap
__lowercase : Dict = index
__lowercase : Optional[Any] = 2 * index + 1 # Left Node
__lowercase : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__lowercase : str = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__lowercase : int = right_index
if largest != index:
__lowercase ,__lowercase : Union[str, Any] = array[largest], array[index]
heapify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[str] = len(__UpperCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for i in range(n - 1 , 0 , -1 ):
__lowercase ,__lowercase : Optional[int] = array[0], array[i]
heapify(__UpperCamelCase , 0 , __UpperCamelCase )
return array
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = low
__lowercase : Dict = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__lowercase ,__lowercase : str = array[j], array[i]
i += 1
def __UpperCAmelCase ( __UpperCamelCase ):
if len(__UpperCamelCase ) == 0:
return array
__lowercase : Tuple = 2 * math.ceil(math.loga(len(__UpperCamelCase ) ) )
__lowercase : Dict = 16
return intro_sort(__UpperCamelCase , 0 , len(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__UpperCamelCase )
max_depth -= 1
__lowercase : str = median_of_a(__UpperCamelCase , __UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
__lowercase : Dict = partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
intro_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__lowercase : List[str] = p
return insertion_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('Enter numbers separated by a comma : ').strip()
a_ = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 76 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : str = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : List[Any] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_))))
__UpperCAmelCase : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__UpperCAmelCase : str = {"unk_token": "<unk>"}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCamelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCamelCase_))
__UpperCAmelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , UpperCamelCase_)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Any , **UpperCamelCase_ : Dict):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_)
def a_ ( self : Union[str, Any] , **UpperCamelCase_ : List[Any]):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_)
def a_ ( self : Tuple , **UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__UpperCAmelCase : List[str] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : int = self.get_rust_tokenizer()
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
processor_slow.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_)
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
processor_fast.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_)
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_)
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_)
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__UpperCAmelCase : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
__UpperCAmelCase : str = self.get_image_processor(do_normalize=UpperCamelCase_)
__UpperCAmelCase : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , UpperCamelCase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : List[str] = processor(images=UpperCamelCase_ , return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : str = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Tuple = "lower newer"
__UpperCAmelCase : List[Any] = processor(text=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Any = tokenizer(UpperCamelCase_ , return_tensors="np")
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist())
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = "lower newer"
__UpperCAmelCase : str = self.prepare_image_inputs()
__UpperCAmelCase : Any = processor(text=UpperCamelCase_ , images=UpperCamelCase_)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Dict = "google/owlvit-base-patch32"
__UpperCAmelCase : str = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : int = ["cat", "nasa badge"]
__UpperCAmelCase : Dict = processor(text=UpperCamelCase_)
__UpperCAmelCase : List[Any] = 16
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "google/owlvit-base-patch32"
__UpperCAmelCase : Tuple = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = [["cat", "nasa badge"], ["person"]]
__UpperCAmelCase : List[str] = processor(text=UpperCamelCase_)
__UpperCAmelCase : List[str] = 16
__UpperCAmelCase : str = len(UpperCamelCase_)
__UpperCAmelCase : List[str] = max([len(UpperCamelCase_) for texts in input_texts])
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length))
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Tuple = "google/owlvit-base-patch32"
__UpperCAmelCase : Dict = OwlViTProcessor.from_pretrained(UpperCamelCase_)
__UpperCAmelCase : Any = ["cat", "nasa badge"]
__UpperCAmelCase : Optional[int] = processor(text=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 16
__UpperCAmelCase : Optional[Any] = inputs["input_ids"]
__UpperCAmelCase : Any = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys()) , ["input_ids", "attention_mask"])
self.assertEqual(inputs["input_ids"].shape , (2, seq_length))
self.assertListEqual(list(input_ids[0]) , predicted_ids[0])
self.assertListEqual(list(input_ids[1]) , predicted_ids[1])
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Any = self.prepare_image_inputs()
__UpperCAmelCase : List[Any] = self.prepare_image_inputs()
__UpperCAmelCase : Optional[Any] = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_)
self.assertListEqual(list(inputs.keys()) , ["query_pixel_values", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_):
processor()
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : str = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_)
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : List[str] = processor.batch_decode(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_)
| 77 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE__ : str = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__lowerCamelCase = 'translator'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSeqaSeqLM
__lowerCamelCase = LANGUAGE_CODES
__lowerCamelCase = ['text', 'text', 'text']
__lowerCamelCase = ['text']
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
UpperCAmelCase__ : Union[str, Any] = self.lang_to_code[src_lang]
UpperCAmelCase__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_lowerCAmelCase , return_tensors="""pt""" , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.model.generate(**_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_lowerCAmelCase )
| 79 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 0 |
from math import isqrt, loga
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase , lowerCamelCase ):
__lowercase = False
return [i for i in range(2 , lowerCamelCase ) if is_prime[i]]
def snake_case ( lowerCamelCase = 800_800 , lowerCamelCase = 800_800 ):
'''simple docstring'''
__lowercase = degree * loga(lowerCamelCase )
__lowercase = int(lowerCamelCase )
__lowercase = calculate_prime_numbers(lowerCamelCase )
__lowercase = 0
__lowercase = 0
__lowercase = len(lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : Dict=3 , lowerCamelCase : Optional[Any]=18 , lowerCamelCase : Optional[Any]=30 , lowerCamelCase : Any=400 , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=None , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : str=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCamelCase : Optional[int]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCamelCase : Optional[int]=True , ) -> List[Any]:
__snake_case : List[str] = size if size is not None else {"height": 224, "width": 224}
__snake_case : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
__snake_case : Tuple = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Union[str, Any] = image_size
__snake_case : Union[str, Any] = min_resolution
__snake_case : str = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Optional[int] = size
__snake_case : int = do_center_crop
__snake_case : Dict = crop_size
__snake_case : List[Any] = do_normalize
__snake_case : str = image_mean
__snake_case : Optional[int] = image_std
__snake_case : Union[str, Any] = do_convert_rgb
def __snake_case ( self : str ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any]=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=False ) -> Optional[int]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : List[str] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__snake_case : List[str] = []
for i in range(self.batch_size ):
__snake_case , __snake_case : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : str = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
__snake_case : Dict = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : int ) -> str:
__snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowerCamelCase )
@property
def __snake_case ( self : List[str] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __snake_case ( self : str ) -> int:
pass
def __snake_case ( self : Optional[int] ) -> Any:
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : List[str] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Any ) -> Dict:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : str = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : Tuple ) -> Optional[int]:
__snake_case : Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowerCamelCase )
__snake_case : Any = 3
@property
def __snake_case ( self : List[Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[str] ) -> Union[str, Any]:
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : List[Any] ) -> Any:
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 81 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCAmelCase_ = DetaConfig(
backbone_config=lowerCAmelCase__ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=lowerCAmelCase__ , with_box_refine=lowerCAmelCase__ , two_stage=lowerCAmelCase__ , )
# set labels
UpperCAmelCase_ = "huggingface/label-files"
if "o365" in model_name:
UpperCAmelCase_ = 366
UpperCAmelCase_ = "object365-id2label.json"
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# transformer decoder self-attention layers
UpperCAmelCase_ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:hidden_size]
UpperCAmelCase_ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size:, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size:]
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = get_deta_config(lowerCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase_ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(lowerCAmelCase__ , param.shape )
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
if "input_proj" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase_ = state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetaForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
model.to(lowerCAmelCase__ )
# load image processor
UpperCAmelCase_ = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(images=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = model(pixel_values.to(lowerCAmelCase__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase_ = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCAmelCase_ = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase_ = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCAmelCase_ = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCAmelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCAmelCase__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 82 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
snake_case__ : Tuple = ["input_features", "is_longer"]
def __init__( self : Optional[int] , __lowerCAmelCase : Tuple=6_4 , __lowerCAmelCase : Union[str, Any]=4_8_0_0_0 , __lowerCAmelCase : int=4_8_0 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : int=1_0_2_4 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : int=False , __lowerCAmelCase : float = 0 , __lowerCAmelCase : float = 1_4_0_0_0 , __lowerCAmelCase : int = None , __lowerCAmelCase : str = "fusion" , __lowerCAmelCase : str = "repeatpad" , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : str = top_db
_lowerCamelCase : Optional[int] = truncation
_lowerCamelCase : Tuple = padding
_lowerCamelCase : Any = fft_window_size
_lowerCamelCase : Tuple = (fft_window_size >> 1) + 1
_lowerCamelCase : int = hop_length
_lowerCamelCase : List[Any] = max_length_s
_lowerCamelCase : Optional[Any] = max_length_s * sampling_rate
_lowerCamelCase : List[str] = sampling_rate
_lowerCamelCase : Tuple = frequency_min
_lowerCamelCase : List[Any] = frequency_max
_lowerCamelCase : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCAmelCase , min_frequency=__lowerCAmelCase , max_frequency=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , norm=__lowerCAmelCase , mel_scale='''htk''' , )
_lowerCamelCase : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__lowerCAmelCase , min_frequency=__lowerCAmelCase , max_frequency=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowerCamelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : np.array , __lowerCAmelCase : Optional[np.array] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = spectrogram(
__lowerCAmelCase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__lowerCAmelCase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : Dict = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_lowerCamelCase : Dict = [0]
# randomly choose index for each part
_lowerCamelCase : Dict = np.random.choice(ranges[0] )
_lowerCamelCase : List[str] = np.random.choice(ranges[1] )
_lowerCamelCase : int = np.random.choice(ranges[2] )
_lowerCamelCase : Tuple = mel[idx_front : idx_front + chunk_frames, :]
_lowerCamelCase : int = mel[idx_middle : idx_middle + chunk_frames, :]
_lowerCamelCase : Dict = mel[idx_back : idx_back + chunk_frames, :]
_lowerCamelCase : str = torch.tensor(mel[None, None, :] )
_lowerCamelCase : List[Any] = torch.nn.functional.interpolate(
__lowerCAmelCase , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=__lowerCAmelCase )
_lowerCamelCase : Dict = mel_shrink[0][0].numpy()
_lowerCamelCase : Optional[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : np.array , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowerCamelCase : Any = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowerCamelCase : int = len(__lowerCAmelCase ) - max_length
_lowerCamelCase : str = np.random.randint(0 , overflow + 1 )
_lowerCamelCase : List[str] = waveform[idx : idx + max_length]
_lowerCamelCase : List[Any] = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_lowerCamelCase : Any = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters )
_lowerCamelCase : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowerCamelCase : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowerCamelCase : str = np.stack([mel, mel, mel, mel] , axis=0 )
_lowerCamelCase : Any = False
else:
_lowerCamelCase : Any = self._random_mel_fusion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
_lowerCamelCase : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowerCamelCase : Tuple = int(max_length / len(__lowerCAmelCase ) )
_lowerCamelCase : Dict = np.stack(np.tile(__lowerCAmelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_lowerCamelCase : Optional[Any] = int(max_length / len(__lowerCAmelCase ) )
_lowerCamelCase : int = np.stack(np.tile(__lowerCAmelCase , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = np.pad(__lowerCAmelCase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
_lowerCamelCase : Tuple = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters )
_lowerCamelCase : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_lowerCamelCase : List[Any] = self._np_extract_fbank_features(__lowerCAmelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : str = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : List[str] = truncation if truncation is not None else self.truncation
_lowerCamelCase : Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowerCamelCase : List[str] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_lowerCamelCase : str = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : str = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
_lowerCamelCase : int = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Dict = [np.asarray(__lowerCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
_lowerCamelCase : Dict = [
self._get_input_mel(__lowerCAmelCase , max_length if max_length else self.nb_max_samples , __lowerCAmelCase , __lowerCAmelCase )
for waveform in raw_speech
]
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = []
for mel, longer in padded_inputs:
input_mel.append(__lowerCAmelCase )
is_longer.append(__lowerCAmelCase )
if truncation == "fusion" and sum(__lowerCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowerCamelCase : List[Any] = np.random.randint(0 , len(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = True
if isinstance(input_mel[0] , __lowerCAmelCase ):
_lowerCamelCase : Optional[int] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_lowerCamelCase : Optional[Any] = [[longer] for longer in is_longer]
_lowerCamelCase : Tuple = {'''input_features''': input_mel, '''is_longer''': is_longer}
_lowerCamelCase : Union[str, Any] = BatchFeature(__lowerCAmelCase )
if return_tensors is not None:
_lowerCamelCase : List[str] = input_features.convert_to_tensors(__lowerCAmelCase )
return input_features
| 83 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _a ( *lowercase__ : List[str] ):
'''simple docstring'''
with open(lowercase__ , 'r' ) as fh:
fcntl.flock(lowercase__ , fcntl.LOCK_EX )
try:
print(*lowercase__ )
finally:
fcntl.flock(lowercase__ , fcntl.LOCK_UN )
SCREAMING_SNAKE_CASE__ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.device("cuda", local_rank)
SCREAMING_SNAKE_CASE__ : Dict = socket.gethostname()
SCREAMING_SNAKE_CASE__ : str = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
SCREAMING_SNAKE_CASE__ : str = dist.get_rank()
SCREAMING_SNAKE_CASE__ : List[str] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 85 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 0 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__a :Any = logging.getLogger(__name__)
def __snake_case ( ):
"""simple docstring"""
A_ = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" ,type=__UpperCamelCase ,default="data/dump.txt" ,help="The path to the data." )
parser.add_argument("--tokenizer_type" ,type=__UpperCamelCase ,default="bert" ,choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" ,type=__UpperCamelCase ,default="bert-base-uncased" ,help="The tokenizer to use." )
parser.add_argument("--dump_file" ,type=__UpperCamelCase ,default="data/dump" ,help="The dump file prefix." )
A_ = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
A_ = BertTokenizer.from_pretrained(args.tokenizer_name )
A_ = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
A_ = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
A_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
A_ = tokenizer.special_tokens_map["cls_token"] # `<s>`
A_ = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
A_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
A_ = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
A_ = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path ,"r" ,encoding="utf8" ) as fp:
A_ = fp.readlines()
logger.info("Start encoding" )
logger.info(f'''{len(__UpperCamelCase )} examples to process.''' )
A_ = []
A_ = 0
A_ = 1_0000
A_ = time.time()
for text in data:
A_ = f'''{bos} {text.strip()} {sep}'''
A_ = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
rslt.append(__UpperCamelCase )
iter += 1
if iter % interval == 0:
A_ = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
A_ = time.time()
logger.info("Finished binarization" )
logger.info(f'''{len(__UpperCamelCase )} examples processed.''' )
A_ = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
A_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
A_ = [np.uintaa(__UpperCamelCase ) for d in rslt]
else:
A_ = [np.intaa(__UpperCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(__UpperCamelCase ,"wb" ) as handle:
pickle.dump(rslt_ ,__UpperCamelCase ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main() | 86 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 0 |
from scipy.stats import spearmanr
import datasets
_lowerCamelCase : List[Any] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
_lowerCamelCase : Any = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
_lowerCamelCase : List[Any] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str]=False) ->int:
'''simple docstring'''
A__ = spearmanr(UpperCAmelCase__ , UpperCAmelCase__)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 87 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class lowercase__ ( A_ ):
__UpperCAmelCase = '''markuplm'''
def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=216 , SCREAMING_SNAKE_CASE=1001 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : str = position_embedding_type
_lowerCamelCase : List[Any] = use_cache
_lowerCamelCase : Tuple = classifier_dropout
# additional properties
_lowerCamelCase : Tuple = max_depth
_lowerCamelCase : List[Any] = max_xpath_tag_unit_embeddings
_lowerCamelCase : Optional[int] = max_xpath_subs_unit_embeddings
_lowerCamelCase : Tuple = tag_pad_id
_lowerCamelCase : int = subs_pad_id
_lowerCamelCase : Optional[int] = xpath_unit_hidden_size
| 88 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
SCREAMING_SNAKE_CASE : Optional[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( A , A ) -> Union[str, Any]:
assert isinstance(A , A )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _snake_case ( A , A , A ) -> Any:
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A , keep_in_memory=A ).read()
_check_text_dataset(A , A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _snake_case ( A , A , A ) -> int:
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = TextDatasetReader(A , features=A , cache_dir=A ).read()
_check_text_dataset(A , A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _snake_case ( A , A , A ) -> List[str]:
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A , split=A ).read()
_check_text_dataset(A , A )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _snake_case ( A , A , A ) -> str:
if issubclass(A , A ):
lowerCAmelCase__ = text_path
elif issubclass(A , A ):
lowerCAmelCase__ = [text_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A ).read()
_check_text_dataset(A , A )
def _snake_case ( A , A , A=("train",) ) -> Any:
assert isinstance(A , A )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _snake_case ( A , A , A ) -> Tuple:
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = TextDatasetReader({'''train''': text_path} , cache_dir=A , keep_in_memory=A ).read()
_check_text_datasetdict(A , A )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = TextDatasetReader({'''train''': text_path} , features=A , cache_dir=A ).read()
_check_text_datasetdict(A , A )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _snake_case ( A , A , A ) -> Any:
if split:
lowerCAmelCase__ = {split: text_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': text_path, '''test''': text_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''text''': '''string'''}
lowerCAmelCase__ = TextDatasetReader(A , cache_dir=A ).read()
_check_text_datasetdict(A , A , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 90 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 91 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = ['pixel_values']
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Tuple =size if size is not None else {'''shortest_edge''': 224}
lowercase : List[str] =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowercase : Dict =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Tuple =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ , param_name='''crop_size''' )
lowercase : Dict =do_resize
lowercase : Tuple =size
lowercase : List[Any] =resample
lowercase : Any =do_center_crop
lowercase : Optional[Any] =crop_size
lowercase : List[Any] =do_rescale
lowercase : List[str] =rescale_factor
lowercase : List[Any] =do_normalize
lowercase : Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase : Optional[int] =image_std if image_std is not None else OPENAI_CLIP_STD
lowercase : List[Any] =do_convert_rgb
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
lowercase : Any =get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase : List[str] =get_resize_output_image_size(UpperCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
lowercase : List[str] =get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Any , ):
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase__ : str , ):
'''simple docstring'''
lowercase : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
lowercase : Optional[int] =size if size is not None else self.size
lowercase : List[str] =get_size_dict(UpperCAmelCase__ , param_name='''size''' , default_to_square=UpperCAmelCase__ )
lowercase : int =resample if resample is not None else self.resample
lowercase : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[Any] =crop_size if crop_size is not None else self.crop_size
lowercase : int =get_size_dict(UpperCAmelCase__ , param_name='''crop_size''' , default_to_square=UpperCAmelCase__ )
lowercase : Dict =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
lowercase : List[str] =image_std if image_std is not None else self.image_std
lowercase : Dict =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase : Any =make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase : Tuple =[convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase : Any =[to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
lowercase : Dict =[self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
lowercase : Optional[int] =[self.center_crop(image=UpperCAmelCase__ , size=UpperCAmelCase__ ) for image in images]
if do_rescale:
lowercase : int =[self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
lowercase : Any =[to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowercase : Any ={'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
| 92 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :str = 1
@register_to_config
def __init__( self , __UpperCAmelCase = 2_0_0_0 , __UpperCAmelCase = 0.15 , __UpperCAmelCase = 0.01 , __UpperCAmelCase = 13_48.0 , __UpperCAmelCase = 1E-5 , __UpperCAmelCase = 1 , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = sigma_max
# setable values
lowerCAmelCase__ :str = None
self.set_sigmas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
return sample
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase__ :str = torch.linspace(1 , __UpperCAmelCase , __UpperCAmelCase , device=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase__ :Any = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase__ :Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase__ :str = torch.exp(torch.linspace(math.log(__UpperCAmelCase ) , math.log(__UpperCAmelCase ) , __UpperCAmelCase ) )
lowerCAmelCase__ :Optional[int] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
lowerCAmelCase__ :Dict = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase__ :Tuple = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase__ :List[str] = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase__ :Union[str, Any] = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase__ :Optional[Any] = self.get_adjacent_sigma(__UpperCAmelCase , __UpperCAmelCase ).to(sample.device )
lowerCAmelCase__ :Optional[Any] = torch.zeros_like(__UpperCAmelCase )
lowerCAmelCase__ :str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase__ :int = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase__ :List[Any] = diffusion.unsqueeze(-1 )
lowerCAmelCase__ :Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase__ :Optional[int] = randn_tensor(
sample.shape , layout=sample.layout , generator=__UpperCAmelCase , device=sample.device , dtype=sample.dtype )
lowerCAmelCase__ :Union[str, Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase__ :List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__UpperCAmelCase , prev_sample_mean=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase__ :str = randn_tensor(sample.shape , layout=sample.layout , generator=__UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase__ :Optional[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase__ :Any = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase__ :Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase__ :Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase__ :Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase__ :Optional[int] = step_size.unsqueeze(-1 )
lowerCAmelCase__ :List[str] = sample + step_size * model_output
lowerCAmelCase__ :List[str] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = timesteps.to(original_samples.device )
lowerCAmelCase__ :Optional[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase__ :Dict = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__UpperCAmelCase ) * sigmas[:, None, None, None]
)
lowerCAmelCase__ :List[Any] = noise + original_samples
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 93 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 0 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
def decorator(__A : Tuple ):
lowercase : List[Any] =getattr(__A , '''handle_key''' , [] )
handle += [key]
setattr(__A , '''handle_key''' , __A )
return func
return decorator
def lowercase_ ( *__A : List[str] ) -> List[str]:
"""simple docstring"""
def decorator(__A : Optional[int] ):
lowercase : Union[str, Any] =getattr(__A , '''handle_key''' , [] )
handle += keys
setattr(__A , '''handle_key''' , __A )
return func
return decorator
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __new__( cls : Dict , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : str =super().__new__(cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not hasattr(UpperCAmelCase , '''key_handler''' ):
setattr(UpperCAmelCase , '''key_handler''' , {} )
setattr(UpperCAmelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
lowercase : Any =getattr(UpperCAmelCase , '''handle_key''' , [] )
for key in handled_keys:
lowercase : Dict =value
return new_cls
@staticmethod
def A__ ( cls : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =get_character()
if char != KEYMAP["undefined"]:
lowercase : str =ord(UpperCAmelCase )
lowercase : Dict =cls.key_handler.get(UpperCAmelCase )
if handler:
lowercase : Tuple =char
return handler(cls )
else:
return None
def lowercase_ ( cls : Optional[Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 94 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase_ = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ :
__magic_name__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
__magic_name__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__magic_name__ = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__magic_name__ = field(
default=__A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
UpperCAmelCase_ : int = self.task_name.lower()
class UpperCamelCase_ (__A ):
__magic_name__ = '''train'''
__magic_name__ = '''dev'''
__magic_name__ = '''test'''
class UpperCamelCase_ (__A ):
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
def __init__( self : Optional[Any] , lowerCAmelCase_ : GlueDataTrainingArguments , lowerCAmelCase_ : PreTrainedTokenizerBase , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Union[str, Split] = Split.train , lowerCAmelCase_ : Optional[str] = None , ) -> int:
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , lowerCAmelCase_ , )
UpperCAmelCase_ : Dict = args
UpperCAmelCase_ : Any = glue_processors[args.task_name]()
UpperCAmelCase_ : Any = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
UpperCAmelCase_ : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
UpperCAmelCase_ : Optional[int] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
UpperCAmelCase_ : Optional[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = label_list[2], label_list[1]
UpperCAmelCase_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ : Any = cached_features_file + ".lock"
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
UpperCAmelCase_ : List[Any] = time.time()
UpperCAmelCase_ : Tuple = torch.load(lowerCAmelCase_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
UpperCAmelCase_ : Dict = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCAmelCase_ : Union[str, Any] = self.processor.get_test_examples(args.data_dir )
else:
UpperCAmelCase_ : Any = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCAmelCase_ : Any = examples[:limit_length]
UpperCAmelCase_ : List[str] = glue_convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , max_length=args.max_seq_length , label_list=lowerCAmelCase_ , output_mode=self.output_mode , )
UpperCAmelCase_ : Optional[Any] = time.time()
torch.save(self.features , lowerCAmelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[str] ) -> int:
return len(self.features )
def __getitem__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> InputFeatures:
return self.features[i]
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
return self.label_list
| 95 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[Any] , __snake_case : Union[str, Any]=1 , __snake_case : List[str]=0 , __snake_case : Optional[int]=2 , __snake_case : List[Any]=5_1_2 , __snake_case : str="cls" , __snake_case : Tuple=False , __snake_case : int=True , **__snake_case : Optional[int] , ) -> List[str]:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__magic_name__: Optional[int] = project_dim
__magic_name__: int = pooler_fn
__magic_name__: Dict = learn_encoder
__magic_name__: Dict = use_attention_mask
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = [R"pooler", R"logit_scale"]
UpperCAmelCase__ = [R"position_ids", R"predictions.decoder.bias"]
UpperCAmelCase__ = "roberta"
UpperCAmelCase__ = RobertaSeriesConfig
def __init__( self : List[Any] , __snake_case : List[str] ) -> str:
super().__init__(__snake_case )
__magic_name__: Any = XLMRobertaModel(__snake_case )
__magic_name__: str = nn.Linear(config.hidden_size , config.project_dim )
__magic_name__: Optional[Any] = getattr(__snake_case , """has_pre_transformation""" , __snake_case )
if self.has_pre_transformation:
__magic_name__: Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
__magic_name__: int = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def lowerCamelCase__ ( self : Any , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , ) -> Dict:
__magic_name__: int = return_dict if return_dict is not None else self.config.use_return_dict
__magic_name__: List[str] = self.base_model(
input_ids=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_attentions=__snake_case , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__snake_case , )
if self.has_pre_transformation:
__magic_name__: Dict = outputs["""hidden_states"""][-2]
__magic_name__: int = self.pre_LN(__snake_case )
__magic_name__: Optional[Any] = self.transformation_pre(__snake_case )
return TransformationModelOutput(
projection_state=__snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__magic_name__: Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 96 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[str] = AltDiffusionPipeline
a :List[Any] = TEXT_TO_IMAGE_PARAMS
a :Any = TEXT_TO_IMAGE_BATCH_PARAMS
a :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
a :Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowercase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
lowercase_ = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
lowercase_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ = 7_7
lowercase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
lowercase_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase ( self : Tuple ) -> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowercase ( self : Dict ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : List[Any] ) -> Any:
lowercase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ = RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ )
lowercase_ = text_encoder
lowercase_ = AltDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A photo of an astronaut'''
lowercase_ = alt_pipe(**SCREAMING_SNAKE_CASE_ )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array(
[0.5_74_81_62, 0.60_44_71_45, 0.48_82_12_17, 0.50_10_06_36, 0.5_43_11_85, 0.45_76_36_83, 0.49_65_76_96, 0.48_13_27_33, 0.47_57_30_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Union[str, Any] ) -> int:
lowercase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
torch.manual_seed(0 )
lowercase_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
lowercase_ = RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ )
lowercase_ = text_encoder
lowercase_ = AltDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe(**SCREAMING_SNAKE_CASE_ )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array(
[0.51_60_50_93, 0.5_70_72_41, 0.47_36_55_07, 0.50_57_88_86, 0.5_63_38_77, 0.4_64_25_03, 0.5_18_20_81, 0.48_76_34_84, 0.49_08_42_37] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] ) -> Dict:
# make sure here that pndm scheduler skips prk
lowercase_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A painting of a squirrel eating a burger'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.10_10, 0.08_00, 0.07_94, 0.08_85, 0.08_43, 0.07_62, 0.07_69, 0.07_29, 0.05_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowercase_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
lowercase_ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''A painting of a squirrel eating a burger'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = alt_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''numpy''' )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.40_19, 0.40_52, 0.38_10, 0.41_19, 0.39_16, 0.39_82, 0.46_51, 0.41_95, 0.53_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 97 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 0 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowercase__ : Tuple = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def a__ ( lowercase : Tuple=True ) -> List[Any]:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__magic_name__ ) )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = None
_snake_case : Dict = None
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
_UpperCamelCase = dataset_module_factory(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ )
_UpperCamelCase = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase__ )
_UpperCamelCase = builder_cls(
cache_dir=lowerCAmelCase__ , config_name=lowerCAmelCase__ , hash=dataset_module.hash , )
_UpperCamelCase = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase__ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
_UpperCamelCase = cached_path(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ )
self.assertTrue(os.path.exists(lowerCAmelCase__ ) )
@pytest.mark.integration
def a__ ( lowercase : Any ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
_UpperCamelCase = dataset_module_factory('''wikipedia''', cache_dir=lowercase )
_UpperCamelCase = import_main_class(dataset_module.module_path )
_UpperCamelCase = builder_cls(
cache_dir=lowercase, config_name='''20220301.frr''', hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_UpperCamelCase = None
builder_instance.download_and_prepare()
_UpperCamelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a__ ( lowercase : Any ) -> int:
"""simple docstring"""
_UpperCamelCase = dataset_module_factory('''wikipedia''', cache_dir=lowercase )
_UpperCamelCase = import_main_class(dataset_module.module_path, dataset=lowercase )
_UpperCamelCase = builder_cls(
cache_dir=lowercase, config_name='''20220301.frr''', hash=dataset_module.hash, )
_UpperCamelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase, lowercase )
assert "train" in ds
assert isinstance(ds['''train'''], lowercase )
assert next(iter(ds['''train'''] ) )
| 98 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **__A ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a = deprecated_arg[3:]
setattr(self , __A , not kwargs.pop(__A ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
__a = kwargs.pop("""torchscript""" , self.torchscript )
__a = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__a = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**__A )
_lowerCamelCase = field(default=__A , metadata={"""help""": """Trace the models using torchscript"""} )
_lowerCamelCase = field(default=__A , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
_lowerCamelCase = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def snake_case_ ( self ):
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__a = torch.device("""cpu""" )
__a = 0
elif is_torch_tpu_available():
__a = xm.xla_device()
__a = 0
else:
__a = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__a = torch.cuda.device_count()
return device, n_gpu
@property
def snake_case_ ( self ):
return is_torch_tpu_available() and self.tpu
@property
def snake_case_ ( self ):
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def snake_case_ ( self ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def snake_case_ ( self ):
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def snake_case_ ( self ):
return self.n_gpu > 0
| 99 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 0 |
import functools
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
# Validation
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(lowerCAmelCase_ ) != 3 or not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(lowerCAmelCase_ ) == 0:
return 0
if min(lowerCAmelCase_ ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(lowerCAmelCase_ ) >= 3_6_6:
raise ValueError('''All days elements should be less than 366''' )
SCREAMING_SNAKE_CASE__ = set(lowerCAmelCase_ )
@functools.cache
def dynamic_programming(lowerCAmelCase_ ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCAmelCase__ : int ='\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
lowerCAmelCase__ : List[Any] ='\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
lowerCAmelCase__ : Optional[int] ='\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="auto" , lowerCAmelCase__=-1 , lowerCAmelCase__=0.9 , lowerCAmelCase__=5 , lowerCAmelCase__=5_0_0 , lowerCAmelCase__="gpt2-large" , lowerCAmelCase__=-1 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2_5 , lowerCAmelCase__=5 , lowerCAmelCase__=True , lowerCAmelCase__=2_5 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = compute_mauve(
p_text=lowerCAmelCase__ , q_text=lowerCAmelCase__ , p_features=lowerCAmelCase__ , q_features=lowerCAmelCase__ , p_tokens=lowerCAmelCase__ , q_tokens=lowerCAmelCase__ , num_buckets=lowerCAmelCase__ , pca_max_data=lowerCAmelCase__ , kmeans_explained_var=lowerCAmelCase__ , kmeans_num_redo=lowerCAmelCase__ , kmeans_max_iter=lowerCAmelCase__ , featurize_model_name=lowerCAmelCase__ , device_id=lowerCAmelCase__ , max_text_length=lowerCAmelCase__ , divergence_curve_discretization_size=lowerCAmelCase__ , mauve_scaling_factor=lowerCAmelCase__ , verbose=lowerCAmelCase__ , seed=lowerCAmelCase__ , )
return out
| 101 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A ):
'''simple docstring'''
if len(_A ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCamelCase : list[float] = list(_A )
UpperCamelCase : Dict = degree
def __add__( self , _A ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
UpperCamelCase : Optional[Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _A )
else:
UpperCamelCase : str = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _A )
def __sub__( self , _A ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _A ):
'''simple docstring'''
UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _A )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
'''simple docstring'''
UpperCamelCase : Tuple = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_A )
return polynomial
def __repr__( self ):
'''simple docstring'''
return self.__str__()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCamelCase : List[str] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _A )
def _a ( self , _A = 0 ):
'''simple docstring'''
UpperCamelCase : list[float] = [0] * (self.degree + 2)
UpperCamelCase : str = constant
for i in range(self.degree + 1 ):
UpperCamelCase : List[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _A )
def __eq__( self , _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _A ):
'''simple docstring'''
return not self.__eq__(_A )
| 102 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Optional[Any] = ['''pixel_values''']
def __init__( self : Optional[Any] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : float = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_5_5 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_snake_case = size if size is not None else {'''shortest_edge''': 3_8_4}
_snake_case = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
_snake_case = do_resize
_snake_case = size
# Default value set here for backwards compatibility where the value in config is None
_snake_case = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : float , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
_snake_case = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
_snake_case = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_snake_case = int(shortest_edge / crop_pct )
_snake_case = get_resize_output_image_size(__lowerCamelCase , size=__lowerCamelCase , default_to_square=__lowerCamelCase )
_snake_case = resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=__lowerCamelCase , **__lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : int , ):
"""simple docstring"""
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ):
"""simple docstring"""
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : float = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = crop_pct if crop_pct is not None else self.crop_pct
_snake_case = resample if resample is not None else self.resample
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
_snake_case = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , crop_pct=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
_snake_case = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 103 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Tuple=100, UpperCAmelCase_ : str=" " ) -> List[str]:
"""simple docstring"""
A__ = text.split(UpperCAmelCase_ )
return [character.join(text[i : i + n] ).strip() for i in range(0, len(UpperCAmelCase_ ), UpperCAmelCase_ )]
def _lowerCamelCase ( UpperCAmelCase_ : dict ) -> dict:
"""simple docstring"""
A__ , A__ = [], []
for title, text in zip(documents["title"], documents["text"] ):
if text is not None:
for passage in split_text(UpperCAmelCase_ ):
titles.append(title if title is not None else "" )
texts.append(UpperCAmelCase_ )
return {"title": titles, "text": texts}
def _lowerCamelCase ( UpperCAmelCase_ : dict, UpperCAmelCase_ : DPRContextEncoder, UpperCAmelCase_ : DPRContextEncoderTokenizerFast ) -> dict:
"""simple docstring"""
A__ = ctx_tokenizer(
documents["title"], documents["text"], truncation=UpperCAmelCase_, padding="longest", return_tensors="pt" )["input_ids"]
A__ = ctx_encoder(input_ids.to(device=UpperCAmelCase_ ), return_dict=UpperCAmelCase_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCamelCase ( UpperCAmelCase_ : "RagExampleArguments", UpperCAmelCase_ : "ProcessingArguments", UpperCAmelCase_ : "IndexHnswArguments", ) -> int:
"""simple docstring"""
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
A__ = load_dataset(
"csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
A__ = dataset.map(UpperCAmelCase_, batched=UpperCAmelCase_, num_proc=processing_args.num_proc )
# And compute the embeddings
A__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase_ )
A__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
A__ = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
A__ = dataset.map(
partial(UpperCAmelCase_, ctx_encoder=UpperCAmelCase_, ctx_tokenizer=UpperCAmelCase_ ), batched=UpperCAmelCase_, batch_size=processing_args.batch_size, features=UpperCAmelCase_, )
# And finally save your dataset
A__ = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset" )
dataset.save_to_disk(UpperCAmelCase_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
A__ = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings", custom_index=UpperCAmelCase_ )
# And save the index
A__ = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(UpperCAmelCase_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
A__ : str = field(
default=str(Path(_lowerCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(_lowerCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
A__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=1_6 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
A__ : int = field(
default=7_6_8 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=1_2_8 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 104 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 0 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.config
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
SCREAMING_SNAKE_CASE_ : List[str] = MBartConfig(
is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , add_cross_attention=lowerCamelCase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowerCamelCase_ , add_final_layer_norm=lowerCamelCase_ , )
return encoder_config, decoder_config
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
if "encoder.model" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
SCREAMING_SNAKE_CASE_ : Any = 'encoder.' + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE_ : Tuple = 'encoder.layernorm.bias'
return name
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : List[str] = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ : Dict = key.split('.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(key_split[3] )
SCREAMING_SNAKE_CASE_ : Tuple = int(key_split[5] )
SCREAMING_SNAKE_CASE_ : List[Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ : str = val[:dim, :]
SCREAMING_SNAKE_CASE_ : Optional[int] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val[:dim]
SCREAMING_SNAKE_CASE_ : Dict = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE_ : List[str] = val
return orig_state_dict
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = DonutModel.from_pretrained(lowerCamelCase_ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = get_configs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = DonutSwinModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = MBartForCausalLM(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = VisionEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = original_model.state_dict()
SCREAMING_SNAKE_CASE_ : Any = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify results on scanned document
SCREAMING_SNAKE_CASE_ : Dict = load_dataset('hf-internal-testing/example-documents' )
SCREAMING_SNAKE_CASE_ : int = dataset['test'][0]['image'].convert('RGB' )
SCREAMING_SNAKE_CASE_ : List[Any] = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase_ , from_slow=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE_ : List[Any] = DonutProcessor(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = processor(lowerCamelCase_ , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
SCREAMING_SNAKE_CASE_ : List[Any] = 'When is the coffee break?'
SCREAMING_SNAKE_CASE_ : str = task_prompt.replace('{user_input}' , lowerCamelCase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE_ : Any = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE_ : int = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE_ : str = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'hello world'
else:
raise ValueError('Model name not supported' )
SCREAMING_SNAKE_CASE_ : Optional[int] = original_model.decoder.tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors='pt' )[
'input_ids'
]
SCREAMING_SNAKE_CASE_ : int = original_model.encoder.model.patch_embed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = model.encoder.embeddings(lowerCamelCase_ )
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE_ : Any = original_model.encoder(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.encoder(lowerCamelCase_ ).last_hidden_state
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE_ : Dict = original_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).logits
SCREAMING_SNAKE_CASE_ : int = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
UpperCamelCase__ : Optional[int] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 105 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
A = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
A = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
A = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
A = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
A = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
A = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
A = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
A = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
A = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
A = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
A = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
A = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
A = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
A = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
A = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
A = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
A = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
A = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
A = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
A = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
A = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
A = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
A = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
A = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A = key.split('.' )
A , A = int(key_split[2] ), int(key_split[4] )
A = config.vision_config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A = key.split('.' )
A = int(key_split[3] )
A = config.text_config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[
dim : dim * 2, :
]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
else:
A = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A = val.squeeze_()
else:
A = val
return orig_state_dict
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]="groupvit-gcc-yfcc" , lowerCAmelCase__ : str=False ) -> Any:
'''simple docstring'''
A = GroupViTConfig()
A = GroupViTModel(lowerCAmelCase__ ).eval()
A = torch.load(lowerCAmelCase__ , map_location='cpu' )['model']
A = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
A , A = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
A = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
A = prepare_img()
A = processor(text=['a photo of a cat', 'a photo of a dog'] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='pt' )
with torch.no_grad():
A = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
A = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
A = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('Successfully saved processor and model to' , lowerCAmelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(lowerCAmelCase__ , organization='nielsr' )
model.push_to_hub(lowerCAmelCase__ , organization='nielsr' )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__snake_case :Any =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 106 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
_A = str(__snake_case )
return len(__snake_case ) == 9 and set(__snake_case ) == set('123456789' )
def _SCREAMING_SNAKE_CASE ( ):
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
_A = 1_0_0_0_0_2 * base_num
if is_9_pandigital(__snake_case ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
_A = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(__snake_case ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 107 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''encoder-decoder'''
_lowerCamelCase = True
def __init__( self : int , **lowerCamelCase : Any ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_UpperCAmelCase = kwargs.pop("""encoder""" )
_UpperCAmelCase = encoder_config.pop("""model_type""" )
_UpperCAmelCase = kwargs.pop("""decoder""" )
_UpperCAmelCase = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_UpperCAmelCase = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = AutoConfig.for_model(lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = True
@classmethod
def lowerCamelCase ( cls : Optional[Any] , lowerCamelCase : PretrainedConfig , lowerCamelCase : PretrainedConfig , **lowerCamelCase : int ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output | 108 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a = logging.get_logger(__name__)
a = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Model type selected in the list: ' + ', '.join(_snake_case )} )
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__UpperCamelCase : int = field(
default=128, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : int = field(
default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'}, )
__UpperCamelCase : int = field(
default=64, metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
}, )
__UpperCamelCase : int = field(
default=30, metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__UpperCamelCase : float = field(
default=0.0, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__UpperCamelCase : int = field(
default=20, metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__UpperCamelCase : int = field(
default=0, metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
}, )
__UpperCamelCase : int = field(default=1, metadata={'help': 'multiple threads for converting example to features'} )
class __a ( _snake_case ):
__UpperCamelCase : Optional[Any] = 'train'
__UpperCamelCase : List[Any] = 'dev'
class __a ( _snake_case ):
__UpperCamelCase : SquadDataTrainingArguments
__UpperCamelCase : List[SquadFeatures]
__UpperCamelCase : Split
__UpperCamelCase : bool
def __init__( self : List[str] ,lowerCamelCase : SquadDataTrainingArguments ,lowerCamelCase : PreTrainedTokenizer ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Union[str, Split] = Split.train ,lowerCamelCase : Optional[bool] = False ,lowerCamelCase : Optional[str] = None ,lowerCamelCase : Optional[str] = "pt" ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = is_language_sensitive
__SCREAMING_SNAKE_CASE = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCamelCase ,lowerCamelCase ):
try:
__SCREAMING_SNAKE_CASE = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__SCREAMING_SNAKE_CASE = mode
# Load data features from cache or dataset file
__SCREAMING_SNAKE_CASE = """v2""" if args.version_2_with_negative else """v1"""
__SCREAMING_SNAKE_CASE = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__SCREAMING_SNAKE_CASE = cached_features_file + """.lock"""
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not args.overwrite_cache:
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = torch.load(lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__SCREAMING_SNAKE_CASE = self.old_features["""features"""]
__SCREAMING_SNAKE_CASE = self.old_features.get("""dataset""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.old_features.get("""examples""" ,lowerCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
""" future run""" )
else:
if mode == Split.dev:
__SCREAMING_SNAKE_CASE = self.processor.get_dev_examples(args.data_dir )
else:
__SCREAMING_SNAKE_CASE = self.processor.get_train_examples(args.data_dir )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=lowerCamelCase ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} ,lowerCamelCase ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Optional[Any] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.features[i]
__SCREAMING_SNAKE_CASE = torch.tensor(feature.input_ids ,dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.attention_mask ,dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.token_type_ids ,dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.cls_index ,dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.p_mask ,dtype=torch.float )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.is_impossible ,dtype=torch.float )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__SCREAMING_SNAKE_CASE = torch.tensor(feature.start_position ,dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 109 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class a ( lowercase ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 110 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE_ = """bart"""
SCREAMING_SNAKE_CASE_ = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
if LOAD_DENSE_INDEX:
a_ : int = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
a_ : Tuple = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
a_ : List[Any] = qar_model.eval()
else:
a_ : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
a_ : List[str] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
a_ : Any = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
a_ : int = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
a_ : int = sas_model.eval()
else:
a_ : Dict = make_qa_sas_model(
model_name="t5-small", from_file="seq2seq_models/eli5_t5_model_1024_4.pth", device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def lowerCAmelCase_ ( ) -> Tuple:
if LOAD_DENSE_INDEX:
a_ : Tuple = faiss.StandardGpuResources()
a_ : Optional[Any] = datasets.load_dataset(path="wiki_snippets", name="wiki40b_en_100_0" )["""train"""]
a_ : str = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat", dtype="float32", mode="r", shape=(wikiaab_passages.num_rows, 128), )
a_ : Optional[int] = faiss.IndexFlatIP(128 )
a_ : Any = faiss.index_cpu_to_gpu(_lowerCamelCase, 1, _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
a_ : Tuple = (None, None)
a_ : List[str] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def lowerCAmelCase_ ( ) -> List[Any]:
a_ : Tuple = datasets.load_dataset("eli5", name="LFQA_reddit" )
a_ : Dict = elia["""train_eli5"""]
a_ : int = np.memmap(
"eli5_questions_reps.dat", dtype="float32", mode="r", shape=(elia_train.num_rows, 128) )
a_ : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_indexes()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_models()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = load_train_data()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=10 ) -> int:
a_ : Optional[int] = embed_questions_for_retrieval([question], _lowerCamelCase, _lowerCamelCase )
a_ : Tuple = eli5_train_q_index.search(_lowerCamelCase, _lowerCamelCase )
a_ : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__="wiki40b", SCREAMING_SNAKE_CASE__="dense", SCREAMING_SNAKE_CASE__=10 ) -> Optional[Any]:
if source == "none":
a_ : Dict = (""" <P> """.join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
a_ : Dict = query_qa_dense_index(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
else:
a_ : str = query_es_index(
_lowerCamelCase, _lowerCamelCase, index_name="english_wiki40b_snippets_100w", n_results=_lowerCamelCase, )
a_ : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
a_ : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase, _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE__ : None),
} )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=64, SCREAMING_SNAKE_CASE__=256, SCREAMING_SNAKE_CASE__=False, SCREAMING_SNAKE_CASE__=2, SCREAMING_SNAKE_CASE__=0.95, SCREAMING_SNAKE_CASE__=0.8 ) -> List[str]:
with torch.no_grad():
a_ : Union[str, Any] = qa_sas_generate(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, num_answers=1, num_beams=_lowerCamelCase, min_len=_lowerCamelCase, max_len=_lowerCamelCase, do_sample=_lowerCamelCase, temp=_lowerCamelCase, top_p=_lowerCamelCase, top_k=_lowerCamelCase, max_input_length=1_024, device="cuda:0", )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
SCREAMING_SNAKE_CASE_ = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
SCREAMING_SNAKE_CASE_ = """\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE_ = """\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"""
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE_ = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox("""Demo options""")
if demo_options:
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE_ = action_list.index(action_st)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
SCREAMING_SNAKE_CASE_ = show_type == """Show full text of passages"""
else:
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
SCREAMING_SNAKE_CASE_ = """\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n """
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
SCREAMING_SNAKE_CASE_ = """wiki40b"""
SCREAMING_SNAKE_CASE_ = """dense"""
SCREAMING_SNAKE_CASE_ = """beam"""
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = 2_56
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = st.sidebar.checkbox("""Generation options""")
if generate_options:
SCREAMING_SNAKE_CASE_ = """\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n """
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE_ = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE_ = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE_ = None
# start main text
SCREAMING_SNAKE_CASE_ = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
SCREAMING_SNAKE_CASE_ = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE_ = st.text_input("""Enter your question here:""", """""")
else:
SCREAMING_SNAKE_CASE_ = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method="""dense""", n_results=10)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
SCREAMING_SNAKE_CASE_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE_ = support_list[:10]
SCREAMING_SNAKE_CASE_ = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE_ = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
SCREAMING_SNAKE_CASE_ = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE_ = """[{}]({})""".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE_ = sec_titles.split(""" & """)
SCREAMING_SNAKE_CASE_ = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE_ = find_nearest_training(question)
SCREAMING_SNAKE_CASE_ = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
SCREAMING_SNAKE_CASE_ = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
SCREAMING_SNAKE_CASE_ = """\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 237 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
UpperCamelCase__ = 5
UpperCamelCase__ = 1_0
@require_sentencepiece
@require_tokenizers
class a__ ( __lowercase , unittest.TestCase ):
snake_case__ = SpeechaTextTokenizer
snake_case__ = False
snake_case__ = True
def __UpperCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
super().setUp()
_lowerCAmelCase:Tuple = sp.SentencePieceProcessor()
spm_model.Load(a__)
_lowerCAmelCase:int = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(a__))]
_lowerCAmelCase:int = dict(zip(a__ ,range(len(a__))))
_lowerCAmelCase:Union[str, Any] = Path(self.tmpdirname)
save_json(a__ ,save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a__ ,save_dir / VOCAB_FILES_NAMES['''spm_file'''])
_lowerCAmelCase:Dict = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : Dict) -> int:
"""simple docstring"""
_lowerCAmelCase:Any = """<pad>"""
_lowerCAmelCase:Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__) ,a__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__) ,a__)
def __UpperCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:List[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<s>''')
self.assertEqual(vocab_keys[1] ,'''<pad>''')
self.assertEqual(vocab_keys[-1] ,'''j''')
self.assertEqual(len(a__) ,1001)
def __UpperCamelCase ( self : List[str]) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,1001)
def __UpperCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_lowerCAmelCase:List[Any] = tokenizer.tokenize('''This is a test''')
self.assertListEqual(a__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__) ,[289, 50, 14, 174, 386] ,)
_lowerCAmelCase:Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
a__ ,[SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] ,)
_lowerCAmelCase:Union[str, Any] = tokenizer.convert_tokens_to_ids(a__)
self.assertListEqual(a__ ,[12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_lowerCAmelCase:List[Any] = tokenizer.convert_ids_to_tokens(a__)
self.assertListEqual(
a__ ,[SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] ,)
@slow
def __UpperCamelCase ( self : int) -> Tuple:
"""simple docstring"""
_lowerCAmelCase:List[Any] = {"""input_ids""": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ ,model_name='''facebook/s2t-small-mustc-en-de-st''' ,revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' ,)
@require_sentencepiece
class a__ ( unittest.TestCase ):
snake_case__ = '''valhalla/s2t_mustc_multilinguial_medium'''
snake_case__ = '''C\'est trop cool'''
snake_case__ = '''Esto es genial'''
@classmethod
def __UpperCamelCase ( cls : Optional[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase:SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def __UpperCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] ,4)
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] ,6)
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] ,9)
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] ,11)
def __UpperCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size ,1_0000)
def __UpperCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
self.assertIn(a__ ,self.tokenizer.all_special_ids)
_lowerCAmelCase:Tuple = [ES_CODE, 4, 1601, 47, 7647, 2]
_lowerCAmelCase:Any = self.tokenizer.decode(a__ ,skip_special_tokens=a__)
_lowerCAmelCase:int = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=a__)
self.assertEqual(a__ ,a__)
self.assertNotIn(self.tokenizer.eos_token ,a__)
def __UpperCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = """fr"""
_lowerCAmelCase:int = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] ,a__)
self.assertEqual(encoded[-1] ,self.tokenizer.eos_token_id)
def __UpperCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Dict = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[FR_CODE])
_lowerCAmelCase:Any = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[ES_CODE])
| 227 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Dict = {
"""roberta-base""": 5_12,
"""roberta-large""": 5_12,
"""roberta-large-mnli""": 5_12,
"""distilroberta-base""": 5_12,
"""roberta-base-openai-detector""": 5_12,
"""roberta-large-openai-detector""": 5_12,
}
class UpperCAmelCase_ ( __lowercase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['''input_ids''', '''attention_mask''']
__lowerCamelCase = RobertaTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="replace" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , errors=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , trim_offsets=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = getattr(_lowerCAmelCase , pre_tok_state.pop("""type""" ) )
UpperCAmelCase__ : Optional[Any] = add_prefix_space
UpperCAmelCase__ : List[str] = pre_tok_class(**_lowerCAmelCase )
UpperCAmelCase__ : int = add_prefix_space
UpperCAmelCase__ : Dict = """post_processor"""
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
if tokenizer_component_instance:
UpperCAmelCase__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase__ : str = tuple(state["""cls"""] )
UpperCAmelCase__ : Optional[int] = False
if state.get("""add_prefix_space""" , _lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : List[str] = True
if state.get("""trim_offsets""" , _lowerCAmelCase ) != trim_offsets:
UpperCAmelCase__ : List[str] = trim_offsets
UpperCAmelCase__ : Any = True
if changes_to_apply:
UpperCAmelCase__ : Tuple = getattr(_lowerCAmelCase , state.pop("""type""" ) )
UpperCAmelCase__ : str = component_class(**_lowerCAmelCase )
setattr(self.backend_tokenizer , _lowerCAmelCase , _lowerCAmelCase )
@property
def __UpperCAmelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self , _lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else value
UpperCAmelCase__ : List[Any] = value
def __UpperCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
UpperCAmelCase__ : Any = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
UpperCAmelCase__ : List[str] = kwargs.get("""is_split_into_words""" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
UpperCAmelCase__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : List[str] = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 79 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
from __future__ import annotations
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[int] , __magic_name__ : list[list[int]] ) -> str:
lowerCamelCase_ : str = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__magic_name__ ) != 0:
lowerCamelCase_ : List[str] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__magic_name__ ) != cols:
raise error
for value in row:
if not isinstance(__magic_name__ , (int, float) ):
raise error
lowerCamelCase_ : Any = rows
else:
lowerCamelCase_ : List[Any] = []
def __SCREAMING_SNAKE_CASE ( self : int ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return len(self.rows )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return len(self.rows[0] )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.order[0] == self.order[1]
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Matrix:
lowerCamelCase_ : List[str] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return bool(self.determinant() )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : int ) -> int:
lowerCamelCase_ : Optional[int] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__magic_name__ ).determinant()
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(__magic_name__ , __magic_name__ )
return -1 * self.get_minor(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Matrix:
return Matrix(
[
[self.get_minor(__magic_name__ , __magic_name__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Matrix:
lowerCamelCase_ : List[str] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Matrix:
lowerCamelCase_ : List[Any] = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Optional[int] ) -> str:
return str(self.rows )
def __str__( self : Optional[Any] ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__magic_name__ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : list[int] , __magic_name__ : int | None = None ) -> None:
lowerCamelCase_ : Tuple = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__magic_name__ , __magic_name__ ):
raise type_error
for value in row:
if not isinstance(__magic_name__ , (int, float) ):
raise type_error
if len(__magic_name__ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__magic_name__ )
else:
lowerCamelCase_ : Optional[Any] = self.rows[0:position] + [row] + self.rows[position:]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : list[int] , __magic_name__ : int | None = None ) -> None:
lowerCamelCase_ : Tuple = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__magic_name__ , __magic_name__ ):
raise type_error
for value in column:
if not isinstance(__magic_name__ , (int, float) ):
raise type_error
if len(__magic_name__ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
lowerCamelCase_ : str = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCamelCase_ : List[Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , __magic_name__ : object ) -> bool:
if not isinstance(__magic_name__ , __magic_name__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , __magic_name__ : object ) -> bool:
return not self == other
def __neg__( self : Dict ) -> Matrix:
return self * -1
def __add__( self : List[Any] , __magic_name__ : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any] , __magic_name__ : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Union[str, Any] , __magic_name__ : Matrix | int | float ) -> Matrix:
if isinstance(__magic_name__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__magic_name__ , __magic_name__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__magic_name__ , __magic_name__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : Tuple , __magic_name__ : int ) -> Matrix:
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
lowerCamelCase_ : int = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __magic_name__ : list[int] , __magic_name__ : list[int] ) -> int:
return sum(row[i] * column[i] for i in range(len(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( a__ : Any ) -> None:
create_state_space_tree(_lowerCamelCase ,[] ,0 ,[0 for i in range(len(_lowerCamelCase ) )] )
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Union[str, Any] ,a__ : int ,a__ : str ,) -> None:
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__A : Tuple = True
create_state_space_tree(_lowerCamelCase ,_lowerCamelCase ,index + 1 ,_lowerCamelCase )
current_sequence.pop()
__A : str = False
UpperCAmelCase_ : Optional[int] = [3, 1, 2, 4]
generate_all_permutations(sequence)
UpperCAmelCase_ : str = ['''A''', '''B''', '''C''']
generate_all_permutations(sequence_a)
| 17 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 0 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A : Tuple = """base_with_context"""
def __magic_name__ ( __snake_case : Dict , __snake_case : str ) -> int:
lowercase : str = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowercase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase : Optional[Any] = weights[f"""layers_{lyr_num}"""]
lowercase : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowercase : str = ly_weight["""attention"""]
lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase : List[str] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Any ) -> str:
lowercase : List[Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowercase : str = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase : Union[str, Any] = weights[f"""layers_{lyr_num}"""]
lowercase : List[Any] = ly_weight["""attention"""]
lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase : str = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowercase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __magic_name__ ( __snake_case : int , __snake_case : List[str] ) -> Optional[Any]:
lowercase : Tuple = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowercase : Dict = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowercase : Any = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
lowercase : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase : List[Any] = weights[f"""layers_{lyr_num}"""]
lowercase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowercase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowercase : List[Any] = ly_weight["""self_attention"""]
lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase : str = ly_weight["""MultiHeadDotProductAttention_0"""]
lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowercase : Any = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase : int = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowercase : int = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def __magic_name__ ( __snake_case : Tuple ) -> Optional[int]:
lowercase : Optional[Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase : Any = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
lowercase : Union[str, Any] = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowercase : str = os.path.join(args.checkpoint_path , ".." , "config.gin" )
lowercase : Tuple = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
lowercase : Any = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
lowercase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
lowercase : Dict = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowercase : List[Any] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowercase : int = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowercase : Any = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowerCamelCase )
lowercase : Optional[Any] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowerCamelCase )
lowercase : Optional[int] = load_decoder(ta_checkpoint["target"]["decoder"] , _lowerCamelCase )
lowercase : Any = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowercase : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A : Tuple = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_A : str = parser.parse_args()
main(args)
| 361 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A:
'''simple docstring'''
@staticmethod
def a__ ( *A_ : Dict , **A_ : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def a__ ( self : Dict , A_ : List[Any] , A_ : Optional[int] , A_ : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
lowerCamelCase_ = [
{
"""image""": Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def a__ ( self : int , A_ : Tuple , A_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = vqa_pipeline(A_ , top_k=1 )
self.assertEqual(
A_ , [
[{'score': ANY(A_ ), 'answer': ANY(A_ )}],
[{'score': ANY(A_ ), 'answer': ANY(A_ )}],
] , )
@require_torch
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
lowerCamelCase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCamelCase_ = """How many cats are there?"""
lowerCamelCase_ = vqa_pipeline(image=A_ , question='How many cats are there?' , top_k=2 )
self.assertEqual(
A_ , [{'score': ANY(A_ ), 'answer': ANY(A_ )}, {'score': ANY(A_ ), 'answer': ANY(A_ )}] )
lowerCamelCase_ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
A_ , [{'score': ANY(A_ ), 'answer': ANY(A_ )}, {'score': ANY(A_ ), 'answer': ANY(A_ )}] )
@slow
@require_torch
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
lowerCamelCase_ = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowerCamelCase_ = """How many cats are there?"""
lowerCamelCase_ = vqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
lowerCamelCase_ = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
lowerCamelCase_ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
| 70 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = None
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
SCREAMING_SNAKE_CASE_ : Dict = json.loads(feat_extract.to_json_string())
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(lowercase_ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extraction_class.from_json_file(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Tuple = feat_extract_first.save_pretrained(lowercase_)[0]
check_json_file_has_correct_format(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowercase_)
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extraction_class()
self.assertIsNotNone(lowercase_)
| 512 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class lowercase__ ( __lowercase ):
def __init__( self : Tuple , _lowercase : Dict , _lowercase : Tuple , _lowercase : Tuple=None , _lowercase : Optional[Any]=1 ):
"""simple docstring"""
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = dataset
UpperCAmelCase__ = len(_lowercase ) if n_tasks is None else n_tasks
UpperCAmelCase__ = n_copies
def __iter__( self : int ):
"""simple docstring"""
UpperCAmelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
UpperCAmelCase__ = self.tokenizer(_lowercase , padding=_lowercase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowercase__ ( __lowercase ):
def __init__( self : Any , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : str ):
"""simple docstring"""
UpperCAmelCase__ = start_length
UpperCAmelCase__ = eof_strings
UpperCAmelCase__ = tokenizer
def __call__( self : str , _lowercase : str , _lowercase : Union[str, Any] , **_lowercase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_lowercase )
def __UpperCAmelCase ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = re.split("(%s)" % "|".join(_lowerCamelCase ) , _lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __UpperCAmelCase ( __A , __A , __A , __A , __A , __A=2_0 , **__A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = defaultdict(_lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowerCamelCase ) ):
with torch.no_grad():
UpperCAmelCase__ = batch["""ids"""].shape[-1]
UpperCAmelCase__ = accelerator.unwrap_model(_lowerCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=_lowerCamelCase , **_lowerCamelCase )
# each task is generated batch_size times
UpperCAmelCase__ = batch["""task_id"""].repeat(_lowerCamelCase )
UpperCAmelCase__ = accelerator.pad_across_processes(
_lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase__ = generated_tokens.cpu().numpy()
UpperCAmelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowerCamelCase , _lowerCamelCase ):
gen_token_dict[task].append(_lowerCamelCase )
UpperCAmelCase__ = [[] for _ in range(_lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase__ = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
code_gens[task].append(remove_last_block(_lowerCamelCase ) )
return code_gens
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = HfArgumentParser(_lowerCamelCase )
UpperCAmelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase__ = """false"""
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase__ = Accelerator()
set_seed(args.seed , device_specific=_lowerCamelCase )
# Load model and tokenizer
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase__ = tokenizer.eos_token
UpperCAmelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase__ = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowerCamelCase , _lowerCamelCase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase__ = load_dataset("openai_humaneval" )
UpperCAmelCase__ = load_metric("code_eval" )
UpperCAmelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
UpperCAmelCase__ = args.n_samples // args.batch_size
UpperCAmelCase__ = TokenizedDataset(_lowerCamelCase , human_eval["test"] , n_copies=_lowerCamelCase , n_tasks=_lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase__ = DataLoader(_lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase__ = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
UpperCAmelCase__ = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ = complete_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , n_tasks=_lowerCamelCase , batch_size=args.batch_size , **_lowerCamelCase , )
if accelerator.is_main_process:
UpperCAmelCase__ = []
for task in tqdm(range(_lowerCamelCase ) ):
UpperCAmelCase__ = human_eval["""test"""][task]["""test"""]
UpperCAmelCase__ = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase__ = code_eval_metric.compute(
references=_lowerCamelCase , predictions=_lowerCamelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 475 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__snake_case = nn.Linear(3 , 4 )
__snake_case = nn.BatchNormad(4 )
__snake_case = nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE ) ) )
class UpperCamelCase( __lowercase ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase( __lowercase ):
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return output + 1
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Any:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(test_model._hf_hook , SCREAMING_SNAKE_CASE )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE , "_old_forward" ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = ModelHook()
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , append=SCREAMING_SNAKE_CASE )
self.assertEqual(isinstance(test_model._hf_hook , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(SCREAMING_SNAKE_CASE )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" ) )
self.assertFalse(hasattr(SCREAMING_SNAKE_CASE , "_old_forward" ) )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = torch.randn(2 , 3 )
__snake_case = test_model(x + 1 )
__snake_case = test_model(x + 2 )
__snake_case = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = test_model(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case = PreForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = test_model(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = test_model(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = torch.randn(2 , 3 )
__snake_case = test_model(SCREAMING_SNAKE_CASE )
__snake_case = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = test_model(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = test_model(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = test_model(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , output + 2 , atol=1e-5 )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> int:
'''simple docstring'''
__snake_case = ModelForTest()
__snake_case = torch.randn(2 , 3 )
__snake_case = test_model(SCREAMING_SNAKE_CASE )
__snake_case = PostForwardHook()
add_hook_to_module(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = test_model(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case = True
__snake_case = test_model(SCREAMING_SNAKE_CASE )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case = torch.randn(2 , 3 )
__snake_case = model(SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(SCREAMING_SNAKE_CASE , AlignDevicesHook(io_same_device=SCREAMING_SNAKE_CASE ) )
__snake_case = torch.randn(2 , 3 ).to(0 )
__snake_case = model(SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , torch.device(0 ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
__snake_case = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
__snake_case = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**SCREAMING_SNAKE_CASE ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**SCREAMING_SNAKE_CASE ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
__snake_case = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(SCREAMING_SNAKE_CASE , execution_device=SCREAMING_SNAKE_CASE , offload=SCREAMING_SNAKE_CASE )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case = torch.device(SCREAMING_SNAKE_CASE )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(SCREAMING_SNAKE_CASE , execution_device=SCREAMING_SNAKE_CASE , offload=SCREAMING_SNAKE_CASE , offload_buffers=SCREAMING_SNAKE_CASE )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
__snake_case = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
SCREAMING_SNAKE_CASE , execution_device=SCREAMING_SNAKE_CASE , offload=SCREAMING_SNAKE_CASE , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case = torch.device(SCREAMING_SNAKE_CASE )
self.assertEqual(model.batchnorm.running_mean.device , SCREAMING_SNAKE_CASE )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
SCREAMING_SNAKE_CASE , execution_device=SCREAMING_SNAKE_CASE , offload=SCREAMING_SNAKE_CASE , weights_map=model.state_dict() , offload_buffers=SCREAMING_SNAKE_CASE , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
__snake_case = torch.randn(2 , 3 )
__snake_case = model(SCREAMING_SNAKE_CASE )
self.assertEqual(output.device , SCREAMING_SNAKE_CASE )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(SCREAMING_SNAKE_CASE )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 371 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase_: Optional[int] = logging.get_logger(__name__)
class lowercase__ (enum.Enum ):
"""simple docstring"""
__UpperCamelCase : List[str] = 0
__UpperCamelCase : List[Any] = 1
@add_end_docstrings(__lowercase )
class lowercase__ (__lowercase ):
"""simple docstring"""
__UpperCamelCase : str = '''generated'''
def __init__( self : Any , *__a : Optional[int] , **__a : Any ):
super().__init__(*__a , **__a )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowercase ( self : List[str] , __a : List[str]=None , __a : Tuple=None , __a : Tuple=None , __a : Any=None , __a : int=None , __a : Any=None , **__a : str , ):
snake_case__ : Any = {}
if truncation is not None:
snake_case__ : List[str] = truncation
snake_case__ : Optional[Any] = generate_kwargs
snake_case__ : Dict = {}
if return_tensors is not None and return_type is None:
snake_case__ : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case__ : Any = return_type
if clean_up_tokenization_spaces is not None:
snake_case__ : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case__ : Tuple = self.tokenizer.encode(__a , add_special_tokens=__a )
if len(__a ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case__ : Optional[int] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase ( self : List[Any] , __a : int , __a : int , __a : int ):
return True
def lowercase ( self : List[Any] , *__a : Union[str, Any] , __a : Optional[int] ):
snake_case__ : List[str] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __a ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case__ : List[str] = ([prefix + arg for arg in args[0]],)
snake_case__ : Dict = True
elif isinstance(args[0] , __a ):
snake_case__ : List[str] = (prefix + args[0],)
snake_case__ : Union[str, Any] = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
snake_case__ : Dict = self.tokenizer(*__a , padding=__a , truncation=__a , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Any , *__a : Optional[int] , **__a : Dict ):
snake_case__ : List[Any] = super().__call__(*__a , **__a )
if (
isinstance(args[0] , __a )
and all(isinstance(__a , __a ) for el in args[0] )
and all(len(__a ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowercase ( self : Optional[int] , __a : Optional[Any] , __a : str=TruncationStrategy.DO_NOT_TRUNCATE , **__a : str ):
snake_case__ : Dict = self._parse_and_tokenize(__a , truncation=__a , **__a )
return inputs
def lowercase ( self : Optional[Any] , __a : Any , **__a : List[str] ):
if self.framework == "pt":
snake_case__ : str = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case__ : Optional[int] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case__ : List[str] = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case__ : Optional[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__a , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case__ : Union[str, Any] = self.model.generate(**__a , **__a )
snake_case__ : Optional[int] = output_ids.shape[0]
if self.framework == "pt":
snake_case__ : Union[str, Any] = output_ids.reshape(__a , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case__ : Tuple = tf.reshape(__a , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowercase ( self : str , __a : Union[str, Any] , __a : Any=ReturnType.TEXT , __a : Dict=False ):
snake_case__ : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case__ : Optional[int] = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
snake_case__ : Optional[int] = {
f'{self.return_name}_text': self.tokenizer.decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , )
}
records.append(__a )
return records
@add_end_docstrings(__lowercase )
class lowercase__ (__lowercase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''summary'''
def __call__( self : Tuple , *__a : Any , **__a : str ):
return super().__call__(*__a , **__a )
def lowercase ( self : int , __a : int , __a : int , __a : int ):
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(__lowercase )
class lowercase__ (__lowercase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = '''translation'''
def lowercase ( self : Union[str, Any] , __a : int , __a : int , __a : int ):
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def lowercase ( self : Optional[Any] , *__a : Dict , __a : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , __a : Any=None , __a : List[Any]=None ):
if getattr(self.tokenizer , """_build_translation_inputs""" , __a ):
return self.tokenizer._build_translation_inputs(
*__a , return_tensors=self.framework , truncation=__a , src_lang=__a , tgt_lang=__a )
else:
return super()._parse_and_tokenize(*__a , truncation=__a )
def lowercase ( self : List[Any] , __a : Any=None , __a : Union[str, Any]=None , **__a : Union[str, Any] ):
snake_case__ : List[Any] = super()._sanitize_parameters(**__a )
if src_lang is not None:
snake_case__ : int = src_lang
if tgt_lang is not None:
snake_case__ : Any = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case__ : Tuple = kwargs.get("""task""" , self.task )
snake_case__ : Union[str, Any] = task.split("""_""" )
if task and len(__a ) == 4:
# translation, XX, to YY
snake_case__ : List[Any] = items[1]
snake_case__ : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , *__a : int , **__a : Union[str, Any] ):
return super().__call__(*__a , **__a )
| 648 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 0 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class snake_case_ ( nn.Module ):
def __init__( self , a_ = 1_6 , a_ = 8_8 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = 3_2 , a_ = None , a_ = False , a_ = None , a_ = None , a_ = "geglu" , a_ = None , ):
super().__init__()
a_ : List[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=a_ , attention_head_dim=a_ , in_channels=a_ , num_layers=a_ , dropout=a_ , norm_num_groups=a_ , cross_attention_dim=a_ , attention_bias=a_ , sample_size=a_ , num_vector_embeds=a_ , activation_fn=a_ , num_embeds_ada_norm=a_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a_ : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a_ : Any = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a_ : int = [1, 0]
def snake_case_ ( self , a_ , a_ , a_=None , a_=None , a_=None , a_ = True , ):
a_ : Optional[int] = hidden_states
a_ : int = []
a_ : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a_ : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a_ : Tuple = self.transformer_index_for_condition[i]
a_ : Optional[Any] = self.transformers[transformer_index](
a_ , encoder_hidden_states=a_ , timestep=a_ , cross_attention_kwargs=a_ , return_dict=a_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a_ : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a_ : List[str] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=a_ ) | 237 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( __lowercase , unittest.TestCase ):
snake_case__ = None
snake_case__ = BloomTokenizerFast
snake_case__ = BloomTokenizerFast
snake_case__ = True
snake_case__ = False
snake_case__ = '''tokenizer_file'''
snake_case__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def __UpperCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
super().setUp()
_lowerCAmelCase:List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''')
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self : Tuple ,**a__ : List[Any]) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**a__)
def __UpperCamelCase ( self : Tuple) -> Any:
"""simple docstring"""
_lowerCAmelCase:Tuple = self.get_rust_tokenizer()
_lowerCAmelCase:Optional[int] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
_lowerCAmelCase:Any = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
_lowerCAmelCase:Optional[int] = tokenizer.batch_encode_plus(a__)["""input_ids"""]
self.assertListEqual(a__ ,a__)
_lowerCAmelCase:List[Any] = tokenizer.batch_decode(a__)
self.assertListEqual(a__ ,a__)
def __UpperCamelCase ( self : Optional[int] ,a__ : str=6) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
_lowerCAmelCase:Optional[Any] = self.rust_tokenizer_class.from_pretrained(a__ ,**a__)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_lowerCAmelCase:List[Any] = """This is a simple input"""
_lowerCAmelCase:List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase:int = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase:List[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(a__ ,max_length=a__)
tokenizer_r.encode_plus(a__ ,max_length=a__)
tokenizer_r.batch_encode_plus(a__ ,max_length=a__)
tokenizer_r.encode(a__ ,max_length=a__)
tokenizer_r.batch_encode_plus(a__ ,max_length=a__)
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''')
_lowerCAmelCase:Optional[int] = None # Hotfixing padding = None
self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding='''max_length''')
# Simple input
self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding='''max_length''')
# Simple input
self.assertRaises(
a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding='''max_length''' ,)
# Pair input
self.assertRaises(a__ ,tokenizer_r.encode ,a__ ,max_length=a__ ,padding='''max_length''')
# Pair input
self.assertRaises(a__ ,tokenizer_r.encode_plus ,a__ ,max_length=a__ ,padding='''max_length''')
# Pair input
self.assertRaises(
a__ ,tokenizer_r.batch_encode_plus ,a__ ,max_length=a__ ,padding='''max_length''' ,)
def __UpperCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.get_rust_tokenizer()
_lowerCAmelCase:List[Any] = load_dataset('''xnli''' ,'''all_languages''' ,split='''test''' ,streaming=a__)
_lowerCAmelCase:str = next(iter(a__))["""premise"""] # pick up one data
_lowerCAmelCase:Tuple = list(sample_data.values())
_lowerCAmelCase:Optional[Any] = list(map(tokenizer.encode ,a__))
_lowerCAmelCase:Optional[int] = [tokenizer.decode(a__ ,clean_up_tokenization_spaces=a__) for x in output_tokens]
self.assertListEqual(a__ ,a__)
def __UpperCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) ,1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) ,1)
| 227 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = ["""CLIPFeatureExtractor"""]
SCREAMING_SNAKE_CASE__ : List[str] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 79 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __a ( ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Tuple = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowerCamelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowerCamelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowerCamelCase )
return parser.parse_args()
def __a ( ) -> str:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = parse_args()
# Import training_script as a module.
lowerCamelCase_ : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase_ : Any = script_fpath.stem
lowerCamelCase_ : List[str] = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
lowerCamelCase_ : int = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 488 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 0 |
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> int:
__A : Any = len(_lowerCamelCase )
__A : Dict = len(matrix[0] )
__A : Tuple = min(_lowerCamelCase ,_lowerCamelCase )
for row in range(_lowerCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 ,_lowerCamelCase ):
__A : Optional[int] = matrix[col][row] / matrix[row][row]
for i in range(_lowerCamelCase ,_lowerCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__A : List[Any] = True
for i in range(row + 1 ,_lowerCamelCase ):
if matrix[i][row] != 0:
__A : Optional[Any] = matrix[i], matrix[row]
__A : List[str] = False
break
if reduce:
rank -= 1
for i in range(_lowerCamelCase ):
__A : Tuple = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a__ ( __lowercase ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class a__ ( __lowercase, __lowercase ):
__lowerCAmelCase = 1
@register_to_config
def __init__( self , _a = 2_000 , _a = 0.1_5 , _a = 0.0_1 , _a = 1_3_4_8.0 , _a = 1E-5 , _a = 1 , ):
lowercase : Dict = sigma_max
# setable values
lowercase : Optional[Any] = None
self.set_sigmas(_a , _a , _a , _a )
def __magic_name__ ( self , _a , _a = None ):
return sample
def __magic_name__ ( self , _a , _a = None , _a = None ):
lowercase : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase : List[str] = torch.linspace(1 , _a , _a , device=_a )
def __magic_name__ ( self , _a , _a = None , _a = None , _a = None ):
lowercase : Any = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase : Optional[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_a , _a )
lowercase : Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase : Any = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) )
lowercase : List[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __magic_name__ ( self , _a , _a ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __magic_name__ ( self , _a , _a , _a , _a = None , _a = True , ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
lowercase : List[str] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase : int = timesteps.to(self.discrete_sigmas.device )
lowercase : Dict = self.discrete_sigmas[timesteps].to(sample.device )
lowercase : Union[str, Any] = self.get_adjacent_sigma(_a , _a ).to(sample.device )
lowercase : List[str] = torch.zeros_like(_a )
lowercase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase : int = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase : List[str] = diffusion.unsqueeze(-1 )
lowercase : Optional[int] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase : Any = randn_tensor(
sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype )
lowercase : Union[str, Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a )
def __magic_name__ ( self , _a , _a , _a = None , _a = True , ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase : str = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase : List[str] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowercase : Optional[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowercase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase : Union[str, Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase : int = step_size.unsqueeze(-1 )
lowercase : Any = sample + step_size * model_output
lowercase : List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __magic_name__ ( self , _a , _a , _a , ):
lowercase : Union[str, Any] = timesteps.to(original_samples.device )
lowercase : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_a ) * sigmas[:, None, None, None]
)
lowercase : Union[str, Any] = noise + original_samples
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 361 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] = 4_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = [0, 1]
lowerCamelCase_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase_ = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
UpperCAmelCase_ : int = """\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"""
UpperCAmelCase_ : Dict = """\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"""
UpperCAmelCase_ : List[str] = """\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=False):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowercase_ , lowercase_)["wer"]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
for prediction, reference in zip(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : Dict = compute_measures(lowercase_ , lowercase_)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 512 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 0 |
import numpy as np
import qiskit
def __UpperCAmelCase ( __A = 8 , __A = None ) -> str:
'''simple docstring'''
UpperCAmelCase__ = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
UpperCAmelCase__ = 6 * key_len
# Measurement basis for Alice's qubits.
UpperCAmelCase__ = rng.integers(2 , size=_lowerCamelCase )
# The set of states Alice will prepare.
UpperCAmelCase__ = rng.integers(2 , size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
UpperCAmelCase__ = rng.integers(2 , size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
UpperCAmelCase__ = qiskit.QuantumCircuit(_lowerCamelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
UpperCAmelCase__ = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
UpperCAmelCase__ = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1 , seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
UpperCAmelCase__ = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
UpperCAmelCase__ = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
UpperCAmelCase__ = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase , "0" )
return key
if __name__ == "__main__":
print(f"The generated key is : {bbaa(8, seed=0)}")
from doctest import testmod
testmod()
| 475 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 0 |
from __future__ import annotations
A : Optional[Any] = list[list[int]]
# assigning initial values to the grid
A : str = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A : Union[str, Any] = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowerCAmelCase ( _lowerCAmelCase ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowerCAmelCase ( _lowerCAmelCase ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_lowerCamelCase ):
__snake_case = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__snake_case = digit
if sudoku(_lowerCamelCase ) is not None:
return grid
__snake_case = 0
return None
def _lowerCAmelCase ( _lowerCAmelCase ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_lowerCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
A : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 371 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : str = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_lowerCamelCase)])
snake_case__ : Optional[Any] = np.array(_lowerCamelCase)
snake_case__ : List[str] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _lowerCamelCase)) , x.transpose()) , _lowerCamelCase)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Optional[int] = (1, 2, 1)
snake_case__ : Optional[int] = (1, 1, 0, 7)
snake_case__ : List[Any] = SARIMAX(
_lowerCamelCase , exog=_lowerCamelCase , order=_lowerCamelCase , seasonal_order=_lowerCamelCase)
snake_case__ : List[Any] = model.fit(disp=_lowerCamelCase , maxiter=600 , method="""nm""")
snake_case__ : List[Any] = model_fit.predict(1 , len(_lowerCamelCase) , exog=[test_match])
return result[0]
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Optional[Any] = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1)
regressor.fit(_lowerCamelCase , _lowerCamelCase)
snake_case__ : Optional[Any] = regressor.predict(_lowerCamelCase)
return y_pred[0]
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
train_user.sort()
snake_case__ : List[str] = np.percentile(_lowerCamelCase , 25)
snake_case__ : Optional[int] = np.percentile(_lowerCamelCase , 75)
snake_case__ : List[Any] = qa - qa
snake_case__ : List[str] = qa - (iqr * 0.1)
return low_lim
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Tuple = 0
snake_case__ : str = 0
for i in list_vote:
if i > actual_result:
snake_case__ : str = not_safe + 1
else:
if abs(abs(_lowerCamelCase) - abs(_lowerCamelCase)) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase_: Tuple = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
lowercase_: Tuple = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
lowercase_: Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase_: str = normalize_df[:, 2].tolist()
lowercase_: Tuple = normalize_df[:, 0].tolist()
lowercase_: int = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase_: List[str] = normalize_df[:, [1, 2]].tolist()
lowercase_: Any = x[: len(x) - 1]
lowercase_: Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase_: Dict = total_date[: len(total_date) - 1]
lowercase_: List[str] = total_user[: len(total_user) - 1]
lowercase_: int = total_match[: len(total_match) - 1]
lowercase_: Union[str, Any] = total_date[len(total_date) - 1 :]
lowercase_: Any = total_user[len(total_user) - 1 :]
lowercase_: List[str] = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase_: Union[str, Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase_: int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 648 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class snake_case_ :
def __init__( self , a_ ):
a_ : List[Any] = value
a_ : Node | None = None
a_ : Node | None = None
class snake_case_ :
def __init__( self , a_ ):
a_ : Tuple = tree
def snake_case_ ( self , a_ ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod() | 237 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a__ ( __lowercase ):
snake_case__ = '''decision_transformer'''
snake_case__ = ['''past_key_values''']
snake_case__ = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple ,a__ : Optional[Any]=17 ,a__ : Any=4 ,a__ : Union[str, Any]=128 ,a__ : Optional[Any]=4096 ,a__ : List[Any]=True ,a__ : List[Any]=1 ,a__ : Optional[Any]=1024 ,a__ : Union[str, Any]=3 ,a__ : Tuple=1 ,a__ : Dict=None ,a__ : Tuple="relu" ,a__ : List[str]=0.1 ,a__ : Optional[int]=0.1 ,a__ : Any=0.1 ,a__ : str=1E-5 ,a__ : Dict=0.02 ,a__ : List[str]=True ,a__ : List[str]=True ,a__ : Optional[int]=5_0256 ,a__ : Optional[Any]=5_0256 ,a__ : int=False ,a__ : Union[str, Any]=False ,**a__ : int ,) -> int:
"""simple docstring"""
_lowerCAmelCase:str = state_dim
_lowerCAmelCase:Optional[Any] = act_dim
_lowerCAmelCase:Dict = hidden_size
_lowerCAmelCase:int = max_ep_len
_lowerCAmelCase:Any = action_tanh
_lowerCAmelCase:Union[str, Any] = vocab_size
_lowerCAmelCase:Optional[Any] = n_positions
_lowerCAmelCase:Optional[int] = n_layer
_lowerCAmelCase:List[Any] = n_head
_lowerCAmelCase:Tuple = n_inner
_lowerCAmelCase:int = activation_function
_lowerCAmelCase:List[Any] = resid_pdrop
_lowerCAmelCase:Union[str, Any] = embd_pdrop
_lowerCAmelCase:int = attn_pdrop
_lowerCAmelCase:Optional[Any] = layer_norm_epsilon
_lowerCAmelCase:Optional[Any] = initializer_range
_lowerCAmelCase:str = scale_attn_weights
_lowerCAmelCase:str = use_cache
_lowerCAmelCase:List[Any] = scale_attn_by_inverse_layer_idx
_lowerCAmelCase:Dict = reorder_and_upcast_attn
_lowerCAmelCase:Any = bos_token_id
_lowerCAmelCase:Dict = eos_token_id
super().__init__(bos_token_id=a__ ,eos_token_id=a__ ,**a__)
| 227 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 0 |
import os
import sys
SCREAMING_SNAKE_CASE__ : str = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
SCREAMING_SNAKE_CASE__ : Any = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowerCamelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> Tuple:
'''simple docstring'''
return AutoConfig.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowerCamelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _lowerCamelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return AutoModel.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowerCamelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> Dict:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowerCamelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> Dict:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowerCamelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowerCamelCase ( *__lowerCamelCase , **__lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCamelCase , **_lowerCamelCase )
| 79 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
snake_case_ : Optional[Any] = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 0 |
from pathlib import Path
import fire
from tqdm import tqdm
def __SCREAMING_SNAKE_CASE ( a__ : List[Any]="ro" ,a__ : List[str]="en" ,a__ : Dict="wmt16" ,a__ : List[Any]=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__A : Dict = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__A : Tuple = datasets.load_dataset(_lowerCamelCase ,_lowerCamelCase )
if save_dir is None:
__A : Optional[Any] = f"""{dataset}-{pair}"""
__A : List[Any] = Path(_lowerCamelCase )
save_dir.mkdir(exist_ok=_lowerCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__A : List[str] = """val""" if split == """validation""" else split
__A : List[Any] = save_dir.joinpath(f"""{fn}.source""" )
__A : Union[str, Any] = save_dir.joinpath(f"""{fn}.target""" )
__A : Union[str, Any] = src_path.open("""w+""" )
__A : Dict = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__A : Optional[Any] = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 17 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : str , __snake_case : Dict=1024 ) -> List[str]:
lowercase : int = [], []
lowercase : List[str] = list(zip(_lowerCamelCase , _lowerCamelCase ) )
lowercase : Union[str, Any] = sorted_examples[0]
def is_too_big(__snake_case : Dict ):
return tok(_lowerCamelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase : Union[str, Any] = new_src + """ """ + src
lowercase : List[Any] = new_tgt + """ """ + tgt
if is_too_big(_lowerCamelCase ) or is_too_big(_lowerCamelCase ): # cant fit, finalize example
finished_src.append(_lowerCamelCase )
finished_tgt.append(_lowerCamelCase )
lowercase : Optional[int] = src, tgt
else: # can fit, keep adding
lowercase : Dict = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_lowerCamelCase )
finished_tgt.append(_lowerCamelCase )
return finished_src, finished_tgt
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[Any] ) -> Dict:
lowercase : Optional[Any] = Path(_lowerCamelCase )
save_path.mkdir(exist_ok=_lowerCamelCase )
for split in ["train"]:
lowercase : int = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
lowercase : Union[str, Any] = [x.rstrip() for x in Path(_lowerCamelCase ).open().readlines()]
lowercase : str = [x.rstrip() for x in Path(_lowerCamelCase ).open().readlines()]
lowercase : Optional[Any] = pack_examples(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(f"""packed {split} split from {len(_lowerCamelCase )} examples -> {len(_lowerCamelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(_lowerCamelCase ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(_lowerCamelCase ) )
for split in ["val", "test"]:
lowercase : Any = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(_lowerCamelCase , save_path / f"""{split}.source""" )
shutil.copyfile(_lowerCamelCase , save_path / f"""{split}.target""" )
def __magic_name__ ( ) -> int:
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_lowerCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_lowerCamelCase , default=128 )
parser.add_argument("--data_dir" , type=_lowerCamelCase )
parser.add_argument("--save_path" , type=_lowerCamelCase )
lowercase : int = parser.parse_args()
lowercase : List[str] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 361 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
lowerCamelCase : Optional[Any] = datasets.logging.get_logger(__name__)
lowerCamelCase : int = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowerCamelCase : Optional[Any] = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowerCamelCase : int = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def a__ ( self : int , A_ : Any ) -> Any:
"""simple docstring"""
if self.config_name == "default":
lowerCamelCase_ = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
lowerCamelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : List[str] , A_ : Any , A_ : int=None , A_ : str=False ) -> List[str]:
"""simple docstring"""
if gpus is None:
lowerCamelCase_ = 1 if torch.cuda.is_available() else 0
lowerCamelCase_ = {"""src""": sources, """mt""": predictions, """ref""": references}
lowerCamelCase_ = [dict(zip(A_ , A_ ) ) for t in zip(*data.values() )]
lowerCamelCase_ = self.scorer.predict(A_ , gpus=A_ , progress_bar=A_ )
return {"mean_score": mean_score, "scores": scores}
| 70 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 0 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
UpperCAmelCase_ : Tuple = """\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"""
UpperCAmelCase_ : Optional[int] = """\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"""
UpperCAmelCase_ : Optional[Any] = """\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''')),
'''references''': datasets.Sequence(datasets.Value('''int32''')),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Any=None , lowercase_ : Optional[Any]=1 , lowercase_ : List[str]="binary" , lowercase_ : Tuple=None , lowercase_ : Dict="warn" , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = recall_score(
lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ , zero_division=lowercase_ , )
return {"recall": float(lowercase_) if score.size == 1 else score}
| 512 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.