code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from maths.prime_factors import prime_factors
def snake_case (UpperCAmelCase__ ) -> List[str]:
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Dict = F'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(snake_case_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def _UpperCAmelCase ( a : np.ndarray ):
snake_case__ , snake_case__ , snake_case__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def _UpperCAmelCase ( a : np.ndarray ):
return (gray > 127) & (gray <= 255)
def _UpperCAmelCase ( a : np.ndarray , a : np.ndarray ):
snake_case__ = np.zeros_like(snake_case_ )
snake_case__ = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
snake_case__ = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
snake_case__ = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
snake_case__ = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
a__ = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
a__ = np.array(Image.open(lena_path))
# kernel to be applied
a__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
a__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
a__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 654 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _UpperCAmelCase ( UpperCAmelCase : ndarray ):
"""simple docstring"""
return np.dot(snake_case_ , snake_case_ )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , *,
_lowerCamelCase : float = np.inf , _lowerCamelCase : str = "linear" , _lowerCamelCase : float = 0.0 , ):
'''simple docstring'''
__lowerCamelCase : List[Any] = regularization
__lowerCamelCase : List[str] = gamma
if kernel == "linear":
__lowerCamelCase : Union[str, Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
__lowerCamelCase : List[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__lowerCamelCase : Optional[int] = F"""Unknown kernel: {kernel}"""
raise ValueError(_lowercase )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : ndarray , _lowerCamelCase : ndarray ):
'''simple docstring'''
return np.dot(_lowercase , _lowercase )
def _snake_case ( self : List[Any] , _lowerCamelCase : ndarray , _lowerCamelCase : ndarray ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _snake_case ( self : Dict , _lowerCamelCase : list[ndarray] , _lowerCamelCase : ndarray ):
'''simple docstring'''
__lowerCamelCase : Any = observations
__lowerCamelCase : Any = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__lowerCamelCase ) , ) : Optional[Any] = np.shape(_lowercase )
def to_minimize(_lowerCamelCase : ndarray ) -> float:
__lowerCamelCase : List[str] = 0
((__lowerCamelCase ) , ) : Tuple = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
__lowerCamelCase : Any = LinearConstraint(_lowercase , 0 , 0 )
__lowerCamelCase : Optional[Any] = Bounds(0 , self.regularization )
__lowerCamelCase : List[Any] = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
__lowerCamelCase : Union[str, Any] = l_star
# calculating mean offset of separation plane to points
__lowerCamelCase : Optional[int] = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__lowerCamelCase : List[Any] = s / n
def _snake_case ( self : Optional[int] , _lowerCamelCase : ndarray ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class __UpperCamelCase ( _lowerCAmelCase ):
_UpperCAmelCase = "roberta-prelayernorm"
def __init__( self ,_A=5_0265 ,_A=768 ,_A=12 ,_A=12 ,_A=3072 ,_A="gelu" ,_A=0.1 ,_A=0.1 ,_A=512 ,_A=2 ,_A=0.0_2 ,_A=1E-12 ,_A=1 ,_A=0 ,_A=2 ,_A="absolute" ,_A=True ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase ,bos_token_id=_lowercase ,eos_token_id=_lowercase ,**_lowercase )
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : str = classifier_dropout
class __UpperCamelCase ( _lowerCAmelCase ):
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 259 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A = threading.Lock()
A = None
A = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
A = logging.WARNING
A = True
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
_lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case_)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys()) }''')
return _default_log_level
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
return __name__.split('.')[0]
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
return logging.getLogger(_get_library_name())
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowercase : Optional[int] = logging.StreamHandler() # Set sys.stderr as stream.
_lowercase : Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowercase : Optional[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
_lowercase : List[str] = False
def SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
_lowercase : Any = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_lowercase : Optional[Any] = None
def SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
return log_levels
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Optional[str] = None) -> List[str]:
'''simple docstring'''
if name is None:
_lowercase : int = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case_)
def SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int) -> str:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case_)
def SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
return set_verbosity(snake_case_)
def SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
return set_verbosity(snake_case_)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
return set_verbosity(snake_case_)
def SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
return set_verbosity(snake_case_)
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : logging.Handler) -> int:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case_)
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : logging.Handler) -> List[Any]:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case_)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
_configure_library_root_logger()
_lowercase : Tuple = False
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
_configure_library_root_logger()
_lowercase : List[Any] = True
def SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
_lowercase : Dict = _get_library_root_logger().handlers
for handler in handlers:
_lowercase : Tuple = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s')
handler.setFormatter(snake_case_)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case_)
def SCREAMING_SNAKE_CASE ( self : List[str] , *lowerCAmelCase__ : Dict , **lowerCAmelCase__ : int) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , snake_case_)
if no_advisory_warnings:
return
self.warning(*snake_case_ , **snake_case_)
A = warning_advice
@functools.lru_cache(snake_case_)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[str]) -> Union[str, Any]:
'''simple docstring'''
self.warning(*snake_case_ , **snake_case_)
A = warning_once
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str ,*UpperCamelCase : int ,**UpperCamelCase : str ) -> List[Any]: # pylint: disable=unused-argument
_lowercase : Union[str, Any] = args[0] if args else None
def __iter__( self : Tuple ) -> str:
return iter(self._iterator )
def __getattr__( self : Any ,UpperCamelCase : Optional[int] ) -> Dict:
def empty_fn(*UpperCamelCase : List[str] ,**UpperCamelCase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[Any] ) -> List[str]:
return self
def __exit__( self : List[Any] ,UpperCamelCase : Tuple ,UpperCamelCase : List[Any] ,UpperCamelCase : Dict ) -> str:
return
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int ,*UpperCamelCase : Optional[int] ,**UpperCamelCase : List[Any] ) -> Tuple:
if _tqdm_active:
return tqdm_lib.tqdm(*_lowercase ,**_lowercase )
else:
return EmptyTqdm(*_lowercase ,**_lowercase )
def _lowerCamelCase ( self : Optional[int] ,*UpperCamelCase : Optional[int] ,**UpperCamelCase : Any ) -> Any:
_lowercase : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowercase ,**_lowercase )
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A = _tqdm_cls()
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
global _tqdm_active
_lowercase : str = True
hf_hub_utils.enable_progress_bars()
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
global _tqdm_active
_lowercase : str = False
hf_hub_utils.disable_progress_bars()
| 125 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a ( _lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = "conditional_detr"
lowerCamelCase :Tuple = ["past_key_values"]
lowerCamelCase :Optional[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=3_00 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_="resnet50" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.25 , **lowerCAmelCase_ , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_A = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(_lowercase , _lowercase ):
_A = backbone_config.get("""model_type""" )
_A = CONFIG_MAPPING[backbone_model_type]
_A = config_class.from_dict(_lowercase )
_A = use_timm_backbone
_A = backbone_config
_A = num_channels
_A = num_queries
_A = d_model
_A = encoder_ffn_dim
_A = encoder_layers
_A = encoder_attention_heads
_A = decoder_ffn_dim
_A = decoder_layers
_A = decoder_attention_heads
_A = dropout
_A = attention_dropout
_A = activation_dropout
_A = activation_function
_A = init_std
_A = init_xavier_std
_A = encoder_layerdrop
_A = decoder_layerdrop
_A = encoder_layers
_A = auxiliary_loss
_A = position_embedding_type
_A = backbone
_A = use_pretrained_backbone
_A = dilation
# Hungarian matcher
_A = class_cost
_A = bbox_cost
_A = giou_cost
# Loss coefficients
_A = mask_loss_coefficient
_A = dice_loss_coefficient
_A = cls_loss_coefficient
_A = bbox_loss_coefficient
_A = giou_loss_coefficient
_A = focal_alpha
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def UpperCAmelCase ( self ) -> str:
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ) -> Optional[int]:
return self.d_model
def UpperCAmelCase ( self ) -> Dict:
_A = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_A = self.backbone_config.to_dict()
_A = self.__class__.model_type
return output
class a ( _lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = version.parse('''1.11''' )
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCAmelCase ( self ) -> List[str]:
return 1E-5
@property
def UpperCAmelCase ( self ) -> str:
return 12
| 401 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : str = logging.get_logger(__name__)
__A : Optional[int] = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = "resnet"
lowerCamelCase__ = ["basic", "bottleneck"]
def __init__( self : str , __lowerCamelCase : Dict=3 , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : Dict=[256, 512, 1024, 2048] , __lowerCamelCase : Optional[int]=[3, 4, 6, 3] , __lowerCamelCase : List[Any]="bottleneck" , __lowerCamelCase : List[str]="relu" , __lowerCamelCase : int=False , __lowerCamelCase : Dict=None , __lowerCamelCase : str=None , **__lowerCamelCase : Any , ):
super().__init__(**_lowercase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = downsample_in_first_stage
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(_lowercase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = version.parse("1.11" )
@property
def _snake_case ( self : Optional[Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : int ):
return 1e-3
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase : str ='platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE : str = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__SCREAMING_SNAKE_CASE : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowercase :
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str=13 , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :str=99 , lowerCAmelCase__ :List[str]=16 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[Any]=4 , lowerCAmelCase__ :List[Any]="gelu" , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :Any=0 , lowerCAmelCase__ :Tuple=0.02 , ) -> List[str]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Dict = is_training
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : List[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[str] = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
__SCREAMING_SNAKE_CASE : List[Any] = pad_token_id
__SCREAMING_SNAKE_CASE : Tuple = bos_token_id
__SCREAMING_SNAKE_CASE : str = initializer_range
def __magic_name__( self :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__SCREAMING_SNAKE_CASE : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right(_lowercase , 1 , 2 )
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowercase , )
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def __magic_name__( self :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Any = 20
__SCREAMING_SNAKE_CASE : Tuple = model_class_name(_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = model.encode(inputs_dict['''input_ids'''] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__SCREAMING_SNAKE_CASE : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__SCREAMING_SNAKE_CASE : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE : List[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__SCREAMING_SNAKE_CASE : str = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
__SCREAMING_SNAKE_CASE : Optional[int] = model.decode(_lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def __magic_name__( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = 20
__SCREAMING_SNAKE_CASE : Optional[int] = model_class_name(_lowercase )
__SCREAMING_SNAKE_CASE : int = model.encode(inputs_dict['''input_ids'''] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__SCREAMING_SNAKE_CASE : List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__SCREAMING_SNAKE_CASE : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE : Tuple = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
__SCREAMING_SNAKE_CASE : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__SCREAMING_SNAKE_CASE : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
__SCREAMING_SNAKE_CASE : Any = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
__SCREAMING_SNAKE_CASE : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = 99
def __magic_name__( self :Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__SCREAMING_SNAKE_CASE : Dict = input_ids.shape[0]
__SCREAMING_SNAKE_CASE : List[str] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __magic_name__( self :str ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self._get_config_and_data()
__SCREAMING_SNAKE_CASE : List[Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = lm_model(input_ids=_lowercase )
__SCREAMING_SNAKE_CASE : int = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowercase )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__SCREAMING_SNAKE_CASE : str = FlaxBlenderbotSmallForConditionalGeneration(_lowercase )
__SCREAMING_SNAKE_CASE : Tuple = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : str = lm_model(input_ids=_lowercase , decoder_input_ids=_lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowercase )
def __magic_name__( self :str ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : Optional[Any] = shift_tokens_right(_lowercase , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[int] = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
__SCREAMING_SNAKE_CASE : List[Any] = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowercase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowercase ( _lowerCAmelCase , unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Dict = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __magic_name__( self :Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = FlaxBlenderbotSmallModelTester(self )
def __magic_name__( self :Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def __magic_name__( self :int ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(_lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : Tuple = model_class(_lowercase )
@jax.jit
def encode_jitted(lowerCAmelCase__ :int , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :List[Any] ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : str = encode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : List[Any] = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __magic_name__( self :Optional[int] ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE : Dict = model_class(_lowercase )
__SCREAMING_SNAKE_CASE : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__SCREAMING_SNAKE_CASE : Any = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest('''JIT Enabled''' ):
__SCREAMING_SNAKE_CASE : Tuple = decode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE : int = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __magic_name__( self :Dict ) -> List[str]:
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE : int = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__SCREAMING_SNAKE_CASE : str = np.ones((1, 1) ) * model.config.eos_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = model(_lowercase )
self.assertIsNotNone(_lowercase )
| 696 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def lowerCAmelCase__ ( __magic_name__ ) ->Optional[Any]:
__lowercase = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=["stage2", "stage3", "stage4"] , )
__lowercase = DetaConfig(
backbone_config=snake_case_ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=snake_case_ , with_box_refine=snake_case_ , two_stage=snake_case_ , )
# set labels
__lowercase = "huggingface/label-files"
if "o365" in model_name:
__lowercase = 3_6_6
__lowercase = "object365-id2label.json"
else:
__lowercase = 9_1
__lowercase = "coco-detection-id2label.json"
__lowercase = num_labels
__lowercase = json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type="dataset" ) ) , "r" ) )
__lowercase = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( __magic_name__ ) ->Tuple:
__lowercase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->Dict:
__lowercase = dct.pop(snake_case_ )
__lowercase = val
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->Optional[Any]:
__lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__lowercase = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:dim, :]
__lowercase = in_proj_bias[: dim]
__lowercase = in_proj_weight[
dim : dim * 2, :
]
__lowercase = in_proj_bias[
dim : dim * 2
]
__lowercase = in_proj_weight[
-dim :, :
]
__lowercase = in_proj_bias[-dim :]
# fmt: on
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->Union[str, Any]:
# transformer decoder self-attention layers
__lowercase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__lowercase = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
__lowercase = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowercase = in_proj_weight[:hidden_size, :]
__lowercase = in_proj_bias[:hidden_size]
__lowercase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__lowercase = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase = in_proj_weight[-hidden_size:, :]
__lowercase = in_proj_bias[-hidden_size:]
def lowerCAmelCase__ ( ) ->Optional[int]:
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ) ->List[str]:
__lowercase = get_deta_config(snake_case_ )
# load original state dict
if model_name == "deta-swin-large":
__lowercase = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
__lowercase = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
__lowercase = torch.load(snake_case_ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(snake_case_ , param.shape )
# rename keys
__lowercase = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_swin_q_k_v(snake_case_ , config.backbone_config )
read_in_decoder_q_k_v(snake_case_ , snake_case_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__lowercase = state_dict.pop(snake_case_ )
__lowercase = val
if "input_proj" in key:
__lowercase = state_dict.pop(snake_case_ )
__lowercase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__lowercase = state_dict.pop(snake_case_ )
__lowercase = val
# finally, create HuggingFace model and load state dict
__lowercase = DetaForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
model.to(snake_case_ )
# load image processor
__lowercase = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
__lowercase = prepare_img()
__lowercase = processor(images=snake_case_ , return_tensors="pt" )
__lowercase = encoding["pixel_values"]
__lowercase = model(pixel_values.to(snake_case_ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__lowercase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__lowercase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__lowercase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__lowercase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case_ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case_ ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowercase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 118 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: List[Any] , lowerCAmelCase: List[Any] )-> Dict:
_snake_case : int = 1.5
_snake_case : Union[str, Any] = int(factor * num_class_images )
_snake_case : str = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=snake_case_ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=snake_case_ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
_snake_case : Dict = client.query(text=snake_case_ )
if len(snake_case_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
_snake_case : Dict = int(factor * num_images )
_snake_case : List[str] = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=snake_case_ , aesthetic_weight=0.1 , )
_snake_case : Optional[int] = 0
_snake_case : List[str] = 0
_snake_case : Any = tqdm(desc='downloading real regularization images' , total=snake_case_ )
with open(F"""{class_data_dir}/caption.txt""" , 'w' ) as fa, open(F"""{class_data_dir}/urls.txt""" , 'w' ) as fa, open(
F"""{class_data_dir}/images.txt""" , 'w' ) as fa:
while total < num_class_images:
_snake_case : Optional[int] = class_images[count]
count += 1
try:
_snake_case : List[Any] = requests.get(images['url'] )
if img.status_code == 2_00:
_snake_case : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase_ ( )-> List[str]:
_snake_case : Optional[Any] = argparse.ArgumentParser('' , add_help=snake_case_ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=snake_case_ , type=snake_case_ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=snake_case_ , type=snake_case_ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_00 , type=snake_case_ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 411 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase = 1
@register_to_config
def __init__( self : List[Any] , _a : Any=2000 , _a : Union[str, Any]=0.1 , _a : Union[str, Any]=20 , _a : Optional[int]=1E-3 ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
def __UpperCamelCase ( self : List[Any] , _a : str , _a : Union[str, torch.device] = None ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.linspace(1 , self.config.sampling_eps , _lowercase , device=_lowercase )
def __UpperCamelCase ( self : Optional[int] , _a : Any , _a : Tuple , _a : Dict , _a : Dict=None ) -> Any:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_SCREAMING_SNAKE_CASE =(
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_SCREAMING_SNAKE_CASE =torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_SCREAMING_SNAKE_CASE =std.flatten()
while len(std.shape ) < len(score.shape ):
_SCREAMING_SNAKE_CASE =std.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE =-score / std
# compute
_SCREAMING_SNAKE_CASE =-1.0 / len(self.timesteps )
_SCREAMING_SNAKE_CASE =self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_SCREAMING_SNAKE_CASE =beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_SCREAMING_SNAKE_CASE =beta_t.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE =-0.5 * beta_t * x
_SCREAMING_SNAKE_CASE =torch.sqrt(_lowercase )
_SCREAMING_SNAKE_CASE =drift - diffusion**2 * score
_SCREAMING_SNAKE_CASE =x + drift * dt
# add noise
_SCREAMING_SNAKE_CASE =randn_tensor(x.shape , layout=x.layout , generator=_lowercase , device=x.device , dtype=x.dtype )
_SCREAMING_SNAKE_CASE =x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 691 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
from itertools import product
def _UpperCAmelCase ( a : int , a : int ):
snake_case__ = sides_number
snake_case__ = max_face_number * dice_number
snake_case__ = [0] * (max_total + 1)
snake_case__ = 1
snake_case__ = range(snake_case_ , max_face_number + 1 )
for dice_numbers in product(snake_case_ , repeat=snake_case_ ):
snake_case__ = sum(snake_case_ )
totals_frequencies[total] += 1
return totals_frequencies
def _UpperCAmelCase ( ):
snake_case__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
snake_case__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
snake_case__ = 0
snake_case__ = 9
snake_case__ = 4 * 9
snake_case__ = 6
for peter_total in range(snake_case_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case__ = (4**9) * (6**6)
snake_case__ = peter_wins_count / total_games_number
snake_case__ = round(snake_case_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
def _UpperCAmelCase ( UpperCAmelCase : str ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
__lowerCamelCase : Union[str, Any] = hex_num[0] == """-"""
if is_negative:
__lowerCamelCase : str = hex_num[1:]
try:
__lowerCamelCase : str = int(snake_case_ , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
__lowerCamelCase : Optional[int] = """"""
while int_num > 0:
__lowerCamelCase : Optional[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_lowerCAmelCase = 'pt'
elif is_tf_available():
_lowerCAmelCase = 'tf'
else:
_lowerCAmelCase = 'jax'
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
_UpperCAmelCase = ByTaTokenizer
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCamelCase ( self ,**_A ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowercase )
def __lowerCamelCase ( self ,_A ,_A=False ,_A=20 ,_A=5 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = []
for i in range(len(_lowercase ) ):
try:
_lowerCAmelCase : List[str] = tokenizer.decode([i] ,clean_up_tokenization_spaces=_lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCAmelCase : Any = list(filter(lambda _A : re.match(r'^[ a-zA-Z]+$' ,t[1] ) ,_lowercase ) )
_lowerCAmelCase : Any = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=_lowercase ) ,_lowercase ) )
if max_length is not None and len(_lowercase ) > max_length:
_lowerCAmelCase : Dict = toks[:max_length]
if min_length is not None and len(_lowercase ) < min_length and len(_lowercase ) > 0:
while len(_lowercase ) < min_length:
_lowerCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCAmelCase : Tuple = [t[0] for t in toks]
# Ensure consistency
_lowerCAmelCase : Union[str, Any] = tokenizer.decode(_lowercase ,clean_up_tokenization_spaces=_lowercase )
if " " not in output_txt and len(_lowercase ) > 1:
_lowerCAmelCase : Optional[Any] = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=_lowercase )
+ ' '
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=_lowercase )
)
if with_prefix_space:
_lowerCAmelCase : str = ' ' + output_txt
_lowerCAmelCase : List[Any] = tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
return output_txt, output_ids
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.ta_base_tokenizer
_lowerCAmelCase : Optional[int] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowerCAmelCase : List[Any] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] ,batch_without_eos_added['input_ids'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.ta_base_tokenizer
_lowerCAmelCase : Any = 'Unicode €.'
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowercase )
_lowerCAmelCase : str = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] ,_lowercase )
# decoding
_lowerCAmelCase : Optional[int] = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase ,'Unicode €.</s>' )
_lowerCAmelCase : Any = tokenizer('e è é ê ë' )
_lowerCAmelCase : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] ,_lowercase )
# decoding
_lowerCAmelCase : Tuple = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase ,'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) ,'e è é ê ë</s>' )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.ta_base_tokenizer
_lowerCAmelCase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowerCAmelCase : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowerCAmelCase : int = tokenizer(_lowercase ,padding=_lowercase ,return_tensors=_lowercase )
self.assertIsInstance(_lowercase ,_lowercase )
if FRAMEWORK != "jax":
_lowerCAmelCase : Tuple = list(batch.input_ids.numpy()[0] )
else:
_lowerCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_lowercase ,_lowercase )
self.assertEqual((2, 37) ,batch.input_ids.shape )
self.assertEqual((2, 37) ,batch.attention_mask.shape )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.ta_base_tokenizer
_lowerCAmelCase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCAmelCase : str = tokenizer(_lowercase ,padding=_lowercase ,return_tensors=_lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' ,_lowercase )
self.assertIn('attention_mask' ,_lowercase )
self.assertNotIn('decoder_input_ids' ,_lowercase )
self.assertNotIn('decoder_attention_mask' ,_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.ta_base_tokenizer
_lowerCAmelCase : List[str] = [
'Summary of the text.',
'Another summary.',
]
_lowerCAmelCase : List[str] = tokenizer(
text_target=_lowercase ,max_length=32 ,padding='max_length' ,truncation=_lowercase ,return_tensors=_lowercase )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.ta_base_tokenizer
_lowerCAmelCase : int = ['A long paragraph for summarization. </s>']
_lowerCAmelCase : Optional[Any] = ['Summary of the text. </s>']
# fmt: off
_lowerCAmelCase : int = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowerCAmelCase : str = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowerCAmelCase : Any = tokenizer(_lowercase ,text_target=_lowercase )
self.assertEqual(_lowercase ,batch['input_ids'][0] )
self.assertEqual(_lowercase ,batch['labels'][0] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
_lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : Tuple = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
_lowerCAmelCase : Dict = tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
_lowerCAmelCase : int = tokenizer.__class__.from_pretrained(_lowercase )
_lowerCAmelCase : int = after_tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
shutil.rmtree(_lowercase )
_lowerCAmelCase : Optional[int] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : Any = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowerCAmelCase : str = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowerCAmelCase : Dict = tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
tokenizer.save_pretrained(_lowercase )
_lowerCAmelCase : Any = tokenizer.__class__.from_pretrained(_lowercase )
_lowerCAmelCase : Any = after_tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
self.assertIn('new_additional_special_token' ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
_lowerCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(_lowercase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowercase )
with open(os.path.join(_lowercase ,'special_tokens_map.json' ) ,encoding='utf-8' ) as json_file:
_lowerCAmelCase : List[Any] = json.load(_lowercase )
with open(os.path.join(_lowercase ,'tokenizer_config.json' ) ,encoding='utf-8' ) as json_file:
_lowerCAmelCase : List[Any] = json.load(_lowercase )
_lowerCAmelCase : int = [F"""<extra_id_{i}>""" for i in range(125 )]
_lowerCAmelCase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowerCAmelCase : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_lowercase ,'special_tokens_map.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(_lowercase ,_lowercase )
with open(os.path.join(_lowercase ,'tokenizer_config.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(_lowercase ,_lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(
_lowercase ,)
self.assertIn(
'an_additional_special_token' ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCAmelCase : Dict = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' ,lstrip=_lowercase )]
_lowerCAmelCase : Dict = tokenizer_class.from_pretrained(
_lowercase ,additional_special_tokens=_lowercase ,)
self.assertIn('a_new_additional_special_token' ,tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowercase )
_lowerCAmelCase : Any = tokenizer_class.from_pretrained(_lowercase )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_tokenizers(fast=_lowercase ,do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : Optional[Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_string(_lowercase )
self.assertIsInstance(_lowercase ,_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase : Dict = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(
_lowercase ,skip_special_tokens=_lowercase )
for attr in attributes_list:
setattr(_lowercase ,attr + '_id' ,_lowercase )
self.assertEqual(getattr(_lowercase ,_lowercase ) ,_lowercase )
self.assertEqual(getattr(_lowercase ,attr + '_id' ) ,_lowercase )
setattr(_lowercase ,attr + '_id' ,_lowercase )
self.assertEqual(getattr(_lowercase ,_lowercase ) ,_lowercase )
self.assertEqual(getattr(_lowercase ,attr + '_id' ) ,_lowercase )
setattr(_lowercase ,'additional_special_tokens_ids' ,[] )
self.assertListEqual(getattr(_lowercase ,'additional_special_tokens' ) ,[] )
self.assertListEqual(getattr(_lowercase ,'additional_special_tokens_ids' ) ,[] )
setattr(_lowercase ,'additional_special_tokens_ids' ,[token_id_to_test_setters] )
self.assertListEqual(getattr(_lowercase ,'additional_special_tokens' ) ,[token_to_test_setters] )
self.assertListEqual(getattr(_lowercase ,'additional_special_tokens_ids' ) ,[token_id_to_test_setters] )
| 259 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = XLMRobertaTokenizer
lowerCAmelCase__ : List[Any] = XLMRobertaTokenizerFast
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = True
def _lowerCamelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Optional[Any] = XLMRobertaTokenizer(_lowercase ,keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : List[Any] ) -> str:
_lowercase : Dict = '<pad>'
_lowercase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) ,_lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) ,_lowercase )
def _lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_lowercase ) ,1002 )
def _lowerCamelCase ( self : Any ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size ,1002 )
def _lowerCamelCase ( self : Dict ) -> Optional[int]:
_lowercase : int = XLMRobertaTokenizer(_lowercase ,keep_accents=_lowercase )
_lowercase : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(_lowercase ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
_lowercase : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_lowercase ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
_lowercase : str = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] ,)
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
def _lowerCamelCase ( self : int ) -> int:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowercase : str = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowercase ,**_lowercase )
_lowercase : Dict = self.tokenizer_class.from_pretrained(_lowercase ,**_lowercase )
_lowercase : Optional[int] = tempfile.mkdtemp()
_lowercase : int = tokenizer_r.save_pretrained(_lowercase )
_lowercase : int = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
_lowercase : List[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_lowercase ,_lowercase )
# Checks everything loads correctly in the same way
_lowercase : Optional[Any] = tokenizer_r.from_pretrained(_lowercase )
_lowercase : Dict = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase ,_lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
_lowercase : List[str] = tempfile.mkdtemp()
_lowercase : Tuple = tokenizer_r.save_pretrained(_lowercase ,legacy_format=_lowercase )
_lowercase : Optional[Any] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase ,_lowercase )
# Checks everything loads correctly in the same way
_lowercase : Union[str, Any] = tokenizer_r.from_pretrained(_lowercase )
_lowercase : List[Any] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase ,_lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
_lowercase : Tuple = tempfile.mkdtemp()
_lowercase : Tuple = tokenizer_r.save_pretrained(_lowercase ,legacy_format=_lowercase )
_lowercase : Dict = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowercase : Any = tokenizer_r.from_pretrained(_lowercase )
_lowercase : Tuple = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase ,_lowercase ) )
shutil.rmtree(_lowercase )
@cached_property
def _lowerCamelCase ( self : Dict ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def _lowerCamelCase ( self : List[Any] ) -> Dict:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowercase ,f.name )
_lowercase : Any = XLMRobertaTokenizer(f.name ,keep_accents=_lowercase )
_lowercase : Dict = pickle.dumps(_lowercase )
pickle.loads(_lowercase )
def _lowerCamelCase ( self : List[str] ) -> int:
if not self.test_rust_tokenizer:
return
_lowercase : Tuple = self.get_tokenizer()
_lowercase : Dict = self.get_rust_tokenizer()
_lowercase : List[Any] = 'I was born in 92000, and this is falsé.'
_lowercase : Union[str, Any] = tokenizer.tokenize(_lowercase )
_lowercase : Optional[int] = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
_lowercase : List[str] = tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
_lowercase : Any = rust_tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : Any = tokenizer.encode(_lowercase )
_lowercase : Union[str, Any] = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
@slow
def _lowerCamelCase ( self : Optional[int] ) -> Tuple:
_lowercase : int = 'Hello World!'
_lowercase : Optional[Any] = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowercase ,self.big_tokenizer.encode(_lowercase ) )
@slow
def _lowerCamelCase ( self : str ) -> Union[str, Any]:
_lowercase : Dict = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_lowercase : Optional[Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_lowercase ,self.big_tokenizer.encode(_lowercase ) )
@slow
def _lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
# fmt: off
_lowercase : Dict = {'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase ,model_name='xlm-roberta-base' ,revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' ,)
| 125 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=[30, 30] , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=8 , lowerCAmelCase_=10 , ) -> Tuple:
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = scope
_A = n_targets
_A = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_A = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_A = num_patches + 1 + self.num_detection_tokens
def UpperCAmelCase ( self ) -> str:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_A = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_A = []
for i in range(self.batch_size ):
_A = {}
_A = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_lowercase )
_A = torch.rand(self.n_targets , 4 , device=_lowercase )
labels.append(_lowercase )
_A = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Tuple:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = YolosModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_A = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = YolosForObjectDetection(_lowercase )
model.to(_lowercase )
model.eval()
_A = model(pixel_values=_lowercase )
_A = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_A = model(pixel_values=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :str = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCamelCase :Optional[Any] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowerCamelCase :List[Any] = False
lowerCamelCase :Any = False
lowerCamelCase :int = False
lowerCamelCase :List[Any] = False
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[Any]:
_A = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_A = []
for i in range(self.model_tester.batch_size ):
_A = {}
_A = torch.ones(
size=(self.model_tester.n_targets,) , device=_lowercase , dtype=torch.long )
_A = torch.ones(
self.model_tester.n_targets , 4 , device=_lowercase , dtype=torch.float )
labels.append(_lowercase )
_A = labels
return inputs_dict
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = YolosModelTester(self )
_A = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Optional[int]:
# YOLOS does not use inputs_embeds
pass
def UpperCAmelCase ( self ) -> Tuple:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase ( self ) -> int:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_lowercase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self ) -> int:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
# in YOLOS, the seq_len is different
_A = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_A = True
_A = False
_A = True
_A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_A = outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_A = outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_A = len(_lowercase )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_A = 1
self.assertEqual(out_len + added_hidden_states , len(_lowercase ) )
_A = outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase ( self ) -> Any:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_A = outputs.hidden_states
_A = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# YOLOS has a different seq_length
_A = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_lowercase )
@slow
def UpperCAmelCase ( self ) -> Tuple:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = YolosModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def snake_case ( ) -> List[str]:
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Dict:
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Dict:
_A = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(_lowercase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
_A = model(inputs.pixel_values )
# verify outputs
_A = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , _lowercase )
_A = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_lowercase , )
_A = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowercase , atol=1E-4 ) )
# verify postprocessing
_A = image_processor.post_process_object_detection(
_lowercase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_A = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(_lowercase )
_A = [75, 75, 17, 63, 17]
_A = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(_lowercase )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , _lowercase , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , _lowercase )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , _lowercase ) )
| 401 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__( self :Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
__SCREAMING_SNAKE_CASE : Tuple = 3
__SCREAMING_SNAKE_CASE : str = (32, 32)
__SCREAMING_SNAKE_CASE : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
@property
def __magic_name__( self :int ) -> List[str]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __magic_name__( self :List[str] ) -> str:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowercase )
@property
def __magic_name__( self :str ) -> Optional[int]:
def extract(*lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ):
class _lowercase :
'''simple docstring'''
def __init__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones([0] )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Tuple ) -> List[str]:
self.pixel_values.to(_lowercase )
return self
return Out()
return extract
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : List[str] = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE : int = PNDMScheduler(skip_prk_steps=_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_vae
__SCREAMING_SNAKE_CASE : str = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Union[str, Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__SCREAMING_SNAKE_CASE : Dict = 77
__SCREAMING_SNAKE_CASE : int = self.dummy_image.to(_lowercase )
__SCREAMING_SNAKE_CASE : Any = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE : Optional[int] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
__SCREAMING_SNAKE_CASE : int = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = alt_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowercase , )
__SCREAMING_SNAKE_CASE : Any = output.images
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_lowercase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=_lowercase , return_dict=_lowercase , )[0]
__SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __magic_name__( self :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE : List[Any] = PNDMScheduler(skip_prk_steps=_lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_vae
__SCREAMING_SNAKE_CASE : str = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE : Dict = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = 77
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_image.to(_lowercase )
# put models in fp16
__SCREAMING_SNAKE_CASE : Dict = unet.half()
__SCREAMING_SNAKE_CASE : str = vae.half()
__SCREAMING_SNAKE_CASE : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE : Dict = AltDiffusionImgaImgPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
__SCREAMING_SNAKE_CASE : List[str] = '''A painting of a squirrel eating a burger'''
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = alt_pipe(
[prompt] , generator=_lowercase , num_inference_steps=2 , output_type='''np''' , image=_lowercase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE : List[Any] = init_image.resize((760, 504) )
__SCREAMING_SNAKE_CASE : Any = '''BAAI/AltDiffusion'''
__SCREAMING_SNAKE_CASE : Dict = AltDiffusionImgaImgPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = pipe(
prompt=_lowercase , image=_lowercase , strength=0.75 , guidance_scale=7.5 , generator=_lowercase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : int = output.images[0]
__SCREAMING_SNAKE_CASE : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Dict ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__( self :Tuple ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__SCREAMING_SNAKE_CASE : str = init_image.resize((768, 512) )
__SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
__SCREAMING_SNAKE_CASE : List[Any] = '''BAAI/AltDiffusion'''
__SCREAMING_SNAKE_CASE : Any = AltDiffusionImgaImgPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Tuple = '''A fantasy landscape, trending on artstation'''
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipe(
prompt=_lowercase , image=_lowercase , strength=0.75 , guidance_scale=7.5 , generator=_lowercase , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : List[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 696 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) ->Optional[int]:
# Load configuration defined in the metadata file
with open(snake_case_ ) as metadata_file:
__lowercase = json.load(snake_case_ )
__lowercase = LukeConfig(use_entity_aware_attention=snake_case_ , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
__lowercase = torch.load(snake_case_ , map_location="cpu" )
# Load the entity vocab file
__lowercase = load_entity_vocab(snake_case_ )
__lowercase = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
__lowercase = AddedToken("<ent>" , lstrip=snake_case_ , rstrip=snake_case_ )
__lowercase = AddedToken("<ent2>" , lstrip=snake_case_ , rstrip=snake_case_ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case_ )
with open(os.path.join(snake_case_ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(snake_case_ , snake_case_ )
__lowercase = LukeTokenizer.from_pretrained(snake_case_ )
# Initialize the embeddings of the special tokens
__lowercase = state_dict["embeddings.word_embeddings.weight"]
__lowercase = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
__lowercase = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
__lowercase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowercase = F'''encoder.layer.{layer_index}.attention.self.'''
__lowercase = state_dict[prefix + matrix_name]
__lowercase = state_dict[prefix + matrix_name]
__lowercase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowercase = state_dict["entity_embeddings.entity_embeddings.weight"]
__lowercase = entity_emb[entity_vocab["[MASK]"]]
__lowercase = LukeModel(config=snake_case_ ).eval()
__lowercase , __lowercase = model.load_state_dict(snake_case_ , strict=snake_case_ )
if not (len(snake_case_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {', '.join(snake_case_ )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
__lowercase = LukeTokenizer.from_pretrained(snake_case_ , task="entity_classification" )
__lowercase = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
__lowercase = (3_9, 4_2)
__lowercase = tokenizer(snake_case_ , entity_spans=[span] , add_prefix_space=snake_case_ , return_tensors="pt" )
__lowercase = model(**snake_case_ )
# Verify word hidden states
if model_size == "large":
__lowercase = torch.Size((1, 4_2, 1_0_2_4) )
__lowercase = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
__lowercase = torch.Size((1, 4_2, 7_6_8) )
__lowercase = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__lowercase = torch.Size((1, 1, 1_0_2_4) )
__lowercase = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
__lowercase = torch.Size((1, 1, 7_6_8) )
__lowercase = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case_ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(snake_case_ ) )
model.save_pretrained(snake_case_ )
def lowerCAmelCase__ ( __magic_name__ ) ->Dict:
__lowercase = {}
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case_ ):
__lowercase , __lowercase = line.rstrip().split("\t" )
__lowercase = index
return entity_vocab
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
_lowercase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 118 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
import numpy
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase : numpy.ndarray , UpperCamelCase : numpy.ndarray ):
'''simple docstring'''
_snake_case : Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_snake_case : Tuple = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_snake_case : Any = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_snake_case : Optional[int] = numpy.random.rand(3 , 1 )
# Real output values provided.
_snake_case : Optional[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_snake_case : str = numpy.zeros(output_array.shape )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_snake_case : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_snake_case : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Tuple = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_snake_case : Dict = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_snake_case : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : numpy.ndarray , UpperCamelCase : int , UpperCamelCase : bool ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
_snake_case : str = self.feedforward()
self.back_propagation()
if give_loss:
_snake_case : Optional[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : numpy.ndarray ):
'''simple docstring'''
_snake_case : Optional[int] = input_arr
_snake_case : int = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_snake_case : Any = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_snake_case : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase_ ( lowerCAmelCase: numpy.ndarray )-> int:
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase_ ( lowerCAmelCase: numpy.ndarray )-> Tuple:
return (value) * (1 - (value))
def lowerCamelCase_ ( )-> Optional[int]:
_snake_case : List[Any] = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_snake_case : Tuple = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_snake_case : Tuple = TwoHiddenLayerNeuralNetwork(
input_array=snake_case_ , output_array=snake_case_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=snake_case_ , iterations=10 , give_loss=snake_case_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 411 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( _lowerCAmelCase ):
UpperCAmelCase = (EulerDiscreteScheduler,)
UpperCAmelCase = 10
def __UpperCamelCase ( self : List[Any] , **_a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1100,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase )
return config
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_lowercase )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE =sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE =scheduler.scale_model_input(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =model(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
_SCREAMING_SNAKE_CASE =output.prev_sample
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(_lowercase ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config(prediction_type='''v_prediction''' )
_SCREAMING_SNAKE_CASE =scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter * scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE =sample.to(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE =scheduler.scale_model_input(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =model(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
_SCREAMING_SNAKE_CASE =output.prev_sample
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(_lowercase ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_SCREAMING_SNAKE_CASE =sample.to(_lowercase )
for t in scheduler.timesteps:
_SCREAMING_SNAKE_CASE =scheduler.scale_model_input(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =model(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
_SCREAMING_SNAKE_CASE =output.prev_sample
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(_lowercase ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE =self.get_scheduler_config()
_SCREAMING_SNAKE_CASE =scheduler_class(**_lowercase , use_karras_sigmas=_lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=_lowercase )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =self.dummy_model()
_SCREAMING_SNAKE_CASE =self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_SCREAMING_SNAKE_CASE =sample.to(_lowercase )
for t in scheduler.timesteps:
_SCREAMING_SNAKE_CASE =scheduler.scale_model_input(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =model(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase )
_SCREAMING_SNAKE_CASE =output.prev_sample
_SCREAMING_SNAKE_CASE =torch.sum(torch.abs(_lowercase ) )
_SCREAMING_SNAKE_CASE =torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
| 691 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Any = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class _lowerCAmelCase( _lowerCAmelCase ):
"""simple docstring"""
a : Optional[Any] ="ibert"
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: int = hidden_size
UpperCamelCase_: Union[str, Any] = num_hidden_layers
UpperCamelCase_: int = num_attention_heads
UpperCamelCase_: Any = hidden_act
UpperCamelCase_: str = intermediate_size
UpperCamelCase_: Optional[Any] = hidden_dropout_prob
UpperCamelCase_: Optional[int] = attention_probs_dropout_prob
UpperCamelCase_: Any = max_position_embeddings
UpperCamelCase_: Optional[Any] = type_vocab_size
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: int = layer_norm_eps
UpperCamelCase_: List[str] = position_embedding_type
UpperCamelCase_: Optional[int] = quant_mode
UpperCamelCase_: Any = force_dequant
class _lowerCAmelCase( _lowerCAmelCase ):
"""simple docstring"""
@property
def _a ( self ):
if self.task == "multiple-choice":
UpperCamelCase_: int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_: int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 57 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a__ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowercase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowercase : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowercase : str = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __magic_name__ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = ZeroShotClassificationPipeline(
model=_lowercase , tokenizer=_lowercase , candidate_labels=["""polics""", """health"""])
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __magic_name__ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]):
'''simple docstring'''
snake_case__ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""")
self.assertEqual(_lowercase , {"""sequence""": ANY(_lowercase), """labels""": [ANY(_lowercase)], """scores""": [ANY(_lowercase)]})
# No kwarg
snake_case__ = classifier("""Who are you voting for in 2020?""" , ["""politics"""])
self.assertEqual(_lowercase , {"""sequence""": ANY(_lowercase), """labels""": [ANY(_lowercase)], """scores""": [ANY(_lowercase)]})
snake_case__ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""])
self.assertEqual(_lowercase , {"""sequence""": ANY(_lowercase), """labels""": [ANY(_lowercase)], """scores""": [ANY(_lowercase)]})
snake_case__ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""")
self.assertEqual(
_lowercase , {"""sequence""": ANY(_lowercase), """labels""": [ANY(_lowercase), ANY(_lowercase)], """scores""": [ANY(_lowercase), ANY(_lowercase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""])) , 1.0)
snake_case__ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""])
self.assertEqual(
_lowercase , {"""sequence""": ANY(_lowercase), """labels""": [ANY(_lowercase), ANY(_lowercase)], """scores""": [ANY(_lowercase), ANY(_lowercase)]})
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""])) , 1.0)
snake_case__ = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""")
self.assertEqual(_lowercase , {"""sequence""": ANY(_lowercase), """labels""": [ANY(_lowercase)], """scores""": [ANY(_lowercase)]})
# https://github.com/huggingface/transformers/issues/13846
snake_case__ = classifier(["""I am happy"""] , ["""positive""", """negative"""])
self.assertEqual(
_lowercase , [
{"""sequence""": ANY(_lowercase), """labels""": [ANY(_lowercase), ANY(_lowercase)], """scores""": [ANY(_lowercase), ANY(_lowercase)]}
for i in range(1)
] , )
snake_case__ = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""])
self.assertEqual(
_lowercase , [
{"""sequence""": ANY(_lowercase), """labels""": [ANY(_lowercase), ANY(_lowercase)], """scores""": [ANY(_lowercase), ANY(_lowercase)]}
for i in range(2)
] , )
with self.assertRaises(_lowercase):
classifier("""""" , candidate_labels="""politics""")
with self.assertRaises(_lowercase):
classifier(_lowercase , candidate_labels="""politics""")
with self.assertRaises(_lowercase):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""")
with self.assertRaises(_lowercase):
classifier("""Who are you voting for in 2020?""" , candidate_labels=_lowercase)
with self.assertRaises(_lowercase):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(_lowercase):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=_lowercase , )
self.run_entailment_id(_lowercase)
def __magic_name__ ( self : Any , UpperCamelCase__ : Pipeline):
'''simple docstring'''
snake_case__ = zero_shot_classifier.model.config
snake_case__ = config.labelaid
snake_case__ = zero_shot_classifier.entailment_id
snake_case__ = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1)
snake_case__ = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
snake_case__ = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0)
snake_case__ = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2)
snake_case__ = original_labelaid
self.assertEqual(_lowercase , zero_shot_classifier.entailment_id)
@require_torch
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_0_0 , candidate_labels=["""politics""", """public health""", """science"""])
@require_torch
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
snake_case__ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
snake_case__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""])
self.assertEqual(
nested_simplify(_lowercase) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
snake_case__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""])
self.assertEqual(
nested_simplify(_lowercase) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def __magic_name__ ( self : Tuple):
'''simple docstring'''
snake_case__ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""")
snake_case__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""])
self.assertEqual(
nested_simplify(_lowercase) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} , )
snake_case__ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""")
snake_case__ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""])
self.assertEqual(
nested_simplify(_lowercase) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} , )
snake_case__ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 654 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Dict = 'Hello, World!'
__UpperCamelCase : Optional[int] = 'en_XX'
def _UpperCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : bool ):
"""simple docstring"""
__lowerCamelCase : List[str] = Path("""data_bin""" )
__lowerCamelCase : List[str] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case_ ).parent ) , checkpoint_file=Path(snake_case_ ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(snake_case_ ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(snake_case_ ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(snake_case_ )
__lowerCamelCase : int = xmod.model.encoder.sentence_encoder
__lowerCamelCase : List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCamelCase : List[Any] = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , snake_case_ )
__lowerCamelCase : Optional[Any] = XmodForSequenceClassification(snake_case_ ) if classification_head else XmodForMaskedLM(snake_case_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase : List[str] = xmod_sent_encoder.embed_tokens.weight
__lowerCamelCase : Optional[int] = xmod_sent_encoder.embed_positions.weight
__lowerCamelCase : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCamelCase : str = xmod_sent_encoder.layernorm_embedding.weight
__lowerCamelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase : Any = model.roberta.encoder.layer[i]
__lowerCamelCase : List[str] = xmod_sent_encoder.layers[i]
# self attention
__lowerCamelCase : Optional[int] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
__lowerCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.weight
__lowerCamelCase : Optional[int] = xmod_layer.self_attn.q_proj.bias
__lowerCamelCase : Optional[int] = xmod_layer.self_attn.k_proj.weight
__lowerCamelCase : Optional[Any] = xmod_layer.self_attn.k_proj.bias
__lowerCamelCase : Any = xmod_layer.self_attn.v_proj.weight
__lowerCamelCase : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase : Tuple = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
__lowerCamelCase : str = xmod_layer.self_attn.out_proj.weight
__lowerCamelCase : str = xmod_layer.self_attn.out_proj.bias
__lowerCamelCase : Tuple = xmod_layer.self_attn_layer_norm.weight
__lowerCamelCase : Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCamelCase : List[str] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
__lowerCamelCase : Union[str, Any] = xmod_layer.fca.weight
__lowerCamelCase : Tuple = xmod_layer.fca.bias
# output
__lowerCamelCase : Any = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
__lowerCamelCase : Dict = xmod_layer.fca.weight
__lowerCamelCase : Any = xmod_layer.fca.bias
__lowerCamelCase : str = xmod_layer.final_layer_norm.weight
__lowerCamelCase : Tuple = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCamelCase : Tuple = xmod_layer.adapter_layer_norm.weight
__lowerCamelCase : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCamelCase : str = bert_output.adapter_modules[lang_code]
__lowerCamelCase : Optional[int] = xmod_layer.adapter_modules[lang_code]
__lowerCamelCase : str = from_adapter.fca.weight
__lowerCamelCase : str = from_adapter.fca.bias
__lowerCamelCase : Dict = from_adapter.fca.weight
__lowerCamelCase : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCamelCase : str = xmod_sent_encoder.layer_norm.weight
__lowerCamelCase : str = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCamelCase : int = xmod.model.classification_heads["""mnli"""].dense.weight
__lowerCamelCase : int = xmod.model.classification_heads["""mnli"""].dense.bias
__lowerCamelCase : Any = xmod.model.classification_heads["""mnli"""].out_proj.weight
__lowerCamelCase : Optional[Any] = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowerCamelCase : Tuple = xmod.model.encoder.lm_head.dense.weight
__lowerCamelCase : Optional[int] = xmod.model.encoder.lm_head.dense.bias
__lowerCamelCase : Any = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase : Any = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase : Optional[Any] = xmod.model.encoder.lm_head.weight
__lowerCamelCase : Optional[Any] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase : int = xmod.encode(snake_case_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case_ )
__lowerCamelCase : str = model(snake_case_ )[0]
if classification_head:
__lowerCamelCase : Any = xmod.model.classification_heads["""mnli"""](xmod.extract_features(snake_case_ ) )
else:
__lowerCamelCase : Optional[Any] = xmod.model(snake_case_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase : int = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCamelCase : Tuple = torch.allclose(snake_case_ , snake_case_ , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(snake_case_ ).mkdir(parents=snake_case_ , exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__UpperCamelCase : Dict = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 519 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __UpperCamelCase ( _lowerCAmelCase ):
_UpperCAmelCase = 42
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self ,_A = 3 ,_A = 3 ,_A = ("DownEncoderBlock2D",) ,_A = ("UpDecoderBlock2D",) ,_A = (64,) ,_A = 1 ,_A = "silu" ,_A = 3 ,_A = 32 ,_A = 256 ,_A = 32 ,_A = None ,_A = 0.1_8_2_1_5 ,_A = "group" ,):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_lowerCAmelCase : Optional[int] = Encoder(
in_channels=_lowercase ,out_channels=_lowercase ,down_block_types=_lowercase ,block_out_channels=_lowercase ,layers_per_block=_lowercase ,act_fn=_lowercase ,norm_num_groups=_lowercase ,double_z=_lowercase ,)
_lowerCAmelCase : int = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCAmelCase : Dict = nn.Convad(_lowercase ,_lowercase ,1 )
_lowerCAmelCase : Tuple = VectorQuantizer(_lowercase ,_lowercase ,beta=0.2_5 ,remap=_lowercase ,sane_index_shape=_lowercase )
_lowerCAmelCase : List[str] = nn.Convad(_lowercase ,_lowercase ,1 )
# pass init params to Decoder
_lowerCAmelCase : str = Decoder(
in_channels=_lowercase ,out_channels=_lowercase ,up_block_types=_lowercase ,block_out_channels=_lowercase ,layers_per_block=_lowercase ,act_fn=_lowercase ,norm_num_groups=_lowercase ,norm_type=_lowercase ,)
@apply_forward_hook
def __lowerCamelCase ( self ,_A ,_A = True ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.encoder(_lowercase )
_lowerCAmelCase : Optional[int] = self.quant_conv(_lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowercase )
@apply_forward_hook
def __lowerCamelCase ( self ,_A ,_A = False ,_A = True ):
'''simple docstring'''
if not force_not_quantize:
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = self.quantize(_lowercase )
else:
_lowerCAmelCase : int = h
_lowerCAmelCase : Any = self.post_quant_conv(_lowercase )
_lowerCAmelCase : List[str] = self.decoder(_lowercase ,quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase )
def __lowerCamelCase ( self ,_A ,_A = True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = sample
_lowerCAmelCase : Tuple = self.encode(_lowercase ).latents
_lowerCAmelCase : Optional[Any] = self.decode(_lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowercase )
| 259 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
'''simple docstring'''
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] ,UpperCamelCase : Optional[Any]=None ) -> str:
_lowercase : Dict = data
_lowercase : Dict = None
def __repr__( self : Optional[Any] ) -> Optional[Any]:
_lowercase : str = []
_lowercase : Optional[int] = self
while temp:
string_rep.append(F'''{temp.data}''' )
_lowercase : int = temp.next
return "->".join(_lowercase )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : list) -> List[Any]:
'''simple docstring'''
if not elements_list:
raise Exception('The Elements List is empty')
_lowercase : Union[str, Any] = Node(elements_list[0])
for i in range(1 , len(snake_case_)):
_lowercase : str = Node(elements_list[i])
_lowercase : List[str] = current.next
return head
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Node) -> Optional[Any]:
'''simple docstring'''
if head_node is not None and isinstance(snake_case_ , snake_case_):
print_reverse(head_node.next)
print(head_node.data)
def SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
from doctest import testmod
testmod()
_lowercase : Optional[int] = make_linked_list([14, 52, 14, 12, 43])
print('Linked List:')
print(snake_case_)
print('Elements in Reverse:')
print_reverse(snake_case_)
if __name__ == "__main__":
main()
| 125 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( *lowerCAmelCase_ , **lowerCAmelCase_ ) -> int:
pass
@is_pipeline_test
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCAmelCase ( self ) -> str:
_A = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(_lowercase , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_lowercase ) , [
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}],
[{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """c"""}, {"""score""": 0.333, """label""": """b"""}],
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
] , )
@require_tf
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(_lowercase , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""score""": 0.333, """label""": """a"""}, {"""score""": 0.333, """label""": """b"""}, {"""score""": 0.333, """label""": """c"""}] , )
_A = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
[
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
{"""score""": 0.333, """label""": ANY(_lowercase )},
],
] , )
@slow
@require_torch
def UpperCAmelCase ( self ) -> int:
_A = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(_lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_A = image_classifier(_lowercase , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
] , )
_A = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[
{"""score""": 0.511, """label""": """remote"""},
{"""score""": 0.485, """label""": """cat"""},
{"""score""": 0.004, """label""": """plane"""},
],
]
* 5 , )
| 401 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
from collections import defaultdict
from math import gcd
def __a ( A__ : int = 1500000 ):
SCREAMING_SNAKE_CASE = defaultdict(snake_case_ )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case_ , 2 ):
if gcd(snake_case_ , snake_case_ ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case_ , limit + 1 , snake_case_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'{solution() = }')
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
class _lowercase :
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[Any]=None ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = data
__SCREAMING_SNAKE_CASE : Optional[int] = previous
__SCREAMING_SNAKE_CASE : Any = next_node
def __str__( self :Optional[int] ) -> str:
return f'''{self.data}'''
def __magic_name__( self :Tuple ) -> Dict:
return self.data
def __magic_name__( self :Any ) -> Union[str, Any]:
return self.next
def __magic_name__( self :Any ) -> Optional[Any]:
return self.previous
class _lowercase :
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = head
def __iter__( self :str ) -> List[Any]:
return self
def __magic_name__( self :Optional[int] ) -> Tuple:
if not self.current:
raise StopIteration
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.current.get_data()
__SCREAMING_SNAKE_CASE : Tuple = self.current.get_next()
return value
class _lowercase :
'''simple docstring'''
def __init__( self :int ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[Any] = None # First node in list
__SCREAMING_SNAKE_CASE : Optional[int] = None # Last node in list
def __str__( self :List[str] ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.head
__SCREAMING_SNAKE_CASE : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
__SCREAMING_SNAKE_CASE : int = current.get_next()
return " ".join(str(_lowercase ) for node in nodes )
def __contains__( self :int , lowerCAmelCase__ :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.head
while current:
if current.get_data() == value:
return True
__SCREAMING_SNAKE_CASE : List[str] = current.get_next()
return False
def __iter__( self :Union[str, Any] ) -> str:
return LinkedListIterator(self.head )
def __magic_name__( self :Optional[Any] ) -> str:
if self.head:
return self.head.get_data()
return None
def __magic_name__( self :Union[str, Any] ) -> List[str]:
if self.tail:
return self.tail.get_data()
return None
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Node ) -> List[Any]:
if self.head is None:
__SCREAMING_SNAKE_CASE : Optional[int] = node
__SCREAMING_SNAKE_CASE : Dict = node
else:
self.insert_before_node(self.head , _lowercase )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Node ) -> Optional[int]:
if self.head is None:
self.set_head(_lowercase )
else:
self.insert_after_node(self.tail , _lowercase )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = Node(_lowercase )
if self.head is None:
self.set_head(_lowercase )
else:
self.set_tail(_lowercase )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Node , lowerCAmelCase__ :Node ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = node
__SCREAMING_SNAKE_CASE : Union[str, Any] = node.previous
if node.get_previous() is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = node_to_insert
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = node_to_insert
__SCREAMING_SNAKE_CASE : Optional[int] = node_to_insert
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Node , lowerCAmelCase__ :Node ) -> str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = node
__SCREAMING_SNAKE_CASE : Optional[int] = node.next
if node.get_next() is None:
__SCREAMING_SNAKE_CASE : int = node_to_insert
else:
__SCREAMING_SNAKE_CASE : List[str] = node_to_insert
__SCREAMING_SNAKE_CASE : Dict = node_to_insert
def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = Node(_lowercase )
__SCREAMING_SNAKE_CASE : str = self.head
while node:
if current_position == position:
self.insert_before_node(_lowercase , _lowercase )
return
current_position += 1
__SCREAMING_SNAKE_CASE : str = node.next
self.insert_after_node(self.tail , _lowercase )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
__SCREAMING_SNAKE_CASE : str = node.get_next()
raise Exception('''Node not found''' )
def __magic_name__( self :Any , lowerCAmelCase__ :str ) -> Union[str, Any]:
if (node := self.get_node(_lowercase )) is not None:
if node == self.head:
__SCREAMING_SNAKE_CASE : int = self.head.get_next()
if node == self.tail:
__SCREAMING_SNAKE_CASE : List[str] = self.tail.get_previous()
self.remove_node_pointers(_lowercase )
@staticmethod
def __magic_name__( lowerCAmelCase__ :Node ) -> List[Any]:
if node.get_next():
__SCREAMING_SNAKE_CASE : Any = node.previous
if node.get_previous():
__SCREAMING_SNAKE_CASE : Any = node.next
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : int = None
def __magic_name__( self :str ) -> Optional[Any]:
return self.head is None
def _UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __a :
'''simple docstring'''
def __init__( self ) -> Dict:
'''simple docstring'''
__lowercase = ""
__lowercase = ""
__lowercase = []
__lowercase = 0
__lowercase = 256
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = cva.imread(_lowercase , 0 )
__lowercase = copy.deepcopy(self.img )
__lowercase , __lowercase , __lowercase = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
__lowercase = np.sum(_lowercase )
for i in range(len(_lowercase ) ):
__lowercase = x[i] / self.k
self.sk += prk
__lowercase = (self.L - 1) * self.sk
if self.rem != 0:
__lowercase = int(last % last )
__lowercase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_lowercase )
__lowercase = int(np.ma.count(self.img ) / self.img[1].size )
__lowercase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowercase = self.img[j][i]
if num != self.last_list[num]:
__lowercase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_lowercase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_lowercase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 118 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
def lowerCamelCase_ ( lowerCAmelCase: list )-> Optional[Any]:
_snake_case : int = 0
while len(snake_case_ ) > 1:
_snake_case : Any = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_snake_case : Dict = files.index(min(snake_case_ ) )
temp += files[min_index]
files.pop(snake_case_ )
files.append(snake_case_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =len(snake_case_)
for i in range(length - 1):
_SCREAMING_SNAKE_CASE =i
for k in range(i + 1 ,snake_case_):
if collection[k] < collection[least]:
_SCREAMING_SNAKE_CASE =k
if least != i:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(collection[i], collection[least])
return collection
if __name__ == "__main__":
snake_case_ : Dict = input('''Enter numbers separated by a comma:\n''').strip()
snake_case_ : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 691 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : Optional[int] = logging.get_logger(__name__)
class _lowerCAmelCase( _lowerCAmelCase ):
"""simple docstring"""
a : str =["pixel_values"]
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = 1 / 2_5_5 , _lowerCamelCase = True , _lowerCamelCase = IMAGENET_DEFAULT_MEAN , _lowerCamelCase = IMAGENET_DEFAULT_STD , **_lowerCamelCase , ):
super().__init__(**_lowercase )
UpperCamelCase_: List[str] = size if size is not None else {'shortest_edge': 2_2_4}
UpperCamelCase_: Union[str, Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
UpperCamelCase_: Union[str, Any] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase_: int = get_size_dict(_lowercase , param_name='crop_size' )
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: Any = size
UpperCamelCase_: List[Any] = resample
UpperCamelCase_: Optional[Any] = do_center_crop
UpperCamelCase_: int = crop_size
UpperCamelCase_: str = do_rescale
UpperCamelCase_: Optional[Any] = rescale_factor
UpperCamelCase_: Any = do_normalize
UpperCamelCase_: str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_: List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: List[Any] = get_size_dict(_lowercase , default_to_square=_lowercase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCamelCase_: Optional[Any] = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
UpperCamelCase_: Union[str, Any] = get_resize_output_image_size(_lowercase , size=_lowercase , default_to_square=_lowercase )
UpperCamelCase_: str = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_lowercase , size=(size_dict['height'], size_dict['width']) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: str = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: Optional[int] = resample if resample is not None else self.resample
UpperCamelCase_: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_: Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_: List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_: List[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: List[Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_: int = image_std if image_std is not None else self.image_std
UpperCamelCase_: int = size if size is not None else self.size
UpperCamelCase_: Optional[int] = get_size_dict(_lowercase , default_to_square=_lowercase )
UpperCamelCase_: int = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_: List[str] = get_size_dict(_lowercase , param_name='crop_size' )
UpperCamelCase_: Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: Optional[int] = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
UpperCamelCase_: Tuple = [self.resize(_lowercase , _lowercase , _lowercase ) for image in images]
if do_center_crop:
UpperCamelCase_: Tuple = [self.center_crop(_lowercase , _lowercase ) for image in images]
if do_rescale:
UpperCamelCase_: Dict = [self.rescale(_lowercase , _lowercase ) for image in images]
if do_normalize:
UpperCamelCase_: int = [self.normalize(_lowercase , _lowercase , _lowercase ) for image in images]
UpperCamelCase_: Tuple = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
UpperCamelCase_: Dict = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 57 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
def _UpperCAmelCase ( a : int = 5000_0000 ):
snake_case__ = set()
snake_case__ = int((limit - 24) ** (1 / 2) )
snake_case__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , snake_case_ ) ) )
for primea in primes:
snake_case__ = primea * primea
for primea in primes:
snake_case__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
snake_case__ = primea * primea * primea * primea
snake_case__ = square + cube + tetr
if total >= limit:
break
ret.add(snake_case_ )
return len(snake_case_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( UpperCAmelCase : list , UpperCAmelCase : int ):
"""simple docstring"""
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def _UpperCAmelCase ( UpperCAmelCase : list , UpperCAmelCase : int ):
"""simple docstring"""
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowerCamelCase , __lowerCamelCase : List[Any] = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
__UpperCamelCase : Any = input('Enter integers separated by spaces: ')
__UpperCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 519 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_lowerCAmelCase = logging.getLogger(__name__)
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
_lowerCAmelCase : Any = False
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ):
'''simple docstring'''
if not self.initialized:
_lowerCAmelCase : Optional[int] = RagRetriever(
_lowercase ,question_encoder_tokenizer=_lowercase ,generator_tokenizer=_lowercase ,index=_lowercase ,init_retrieval=_lowercase ,)
_lowerCAmelCase : Union[str, Any] = True
def __lowerCamelCase ( self ):
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = self.retriever._main_retrieve(_lowercase ,_lowercase )
return doc_ids, retrieved_doc_embeds
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self ,_A ,_A ,_A ,_A ,_A=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(_lowercase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_lowercase ,question_encoder_tokenizer=_lowercase ,generator_tokenizer=_lowercase ,index=_lowercase ,init_retrieval=_lowercase ,)
_lowerCAmelCase : Any = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowercase ,_lowercase ,_lowercase ,_lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ):
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self ,_A ,_A ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_lowerCAmelCase : Any = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
_lowerCAmelCase, _lowerCAmelCase : List[str] = ray.get(random_worker.retrieve.remote(_lowercase ,_lowercase ) )
else:
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = self._main_retrieve(_lowercase ,_lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowercase )
@classmethod
def __lowerCamelCase ( cls ,_A ,_A=None ,**_A ):
'''simple docstring'''
return super(_lowercase ,cls ).get_tokenizers(_lowercase ,_lowercase ,**_lowercase )
@classmethod
def __lowerCamelCase ( cls ,_A ,_A ,_A=None ,**_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = kwargs.pop('config' ,_lowercase ) or RagConfig.from_pretrained(_lowercase ,**_lowercase )
_lowerCAmelCase : Union[str, Any] = RagTokenizer.from_pretrained(_lowercase ,config=_lowercase )
_lowerCAmelCase : Tuple = rag_tokenizer.question_encoder
_lowerCAmelCase : Optional[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
_lowerCAmelCase : Any = 'custom'
_lowerCAmelCase : Union[str, Any] = CustomHFIndex(config.retrieval_vector_size ,_lowercase )
else:
_lowerCAmelCase : str = cls._build_index(_lowercase )
return cls(
_lowercase ,question_encoder_tokenizer=_lowercase ,generator_tokenizer=_lowercase ,retrieval_workers=_lowercase ,index=_lowercase ,)
| 259 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['PoolFormerFeatureExtractor']
A = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 125 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
import math
def snake_case ( snake_case__ :int) -> List[Any]:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case ( snake_case__ :float = 0.1) -> List[str]:
_A = 3
_A = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(snake_case_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 401 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
def __a ( A__ : Dict ): # noqa: E741
SCREAMING_SNAKE_CASE = len(snake_case_ )
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = [0] * n
SCREAMING_SNAKE_CASE = [False] * n
SCREAMING_SNAKE_CASE = [False] * n
def dfs(A__ : Tuple , A__ : Union[str, Any] , A__ : Any , A__ : int ):
if parent == root:
out_edge_count += 1
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
SCREAMING_SNAKE_CASE = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
SCREAMING_SNAKE_CASE = True
# AP found via cycle
if at == low[to]:
SCREAMING_SNAKE_CASE = True
else:
SCREAMING_SNAKE_CASE = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
SCREAMING_SNAKE_CASE = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
__A : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 16 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
from math import sqrt
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def _UpperCamelCase ( lowercase__ = 10000 ):
__SCREAMING_SNAKE_CASE : Tuple = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 696 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class __a ( _lowerCAmelCase ):
'''simple docstring'''
_lowerCamelCase : List[str] = "data2vec-text"
def __init__( self , _lowerCamelCase=30_522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3_072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
class __a ( _lowerCAmelCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 118 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : int = None
ops.enable_eager_execution_internal()
_snake_case : Any = tf.config.list_physical_devices('CPU' )
if len(_lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_snake_case : List[Any] = tf.config.list_logical_devices(device_type='CPU' )
_snake_case : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_snake_case : List[str] = GradientAccumulator()
_snake_case : Union[str, Any] = tf.Variable([4.0, 3.0] )
_snake_case , _snake_case : List[str] = create_optimizer(5e-5 , 10 , 5 )
_snake_case : List[str] = tf.Variable([0.0, 0.0] , trainable=_lowercase )
def accumulate_on_replica(UpperCamelCase : Optional[int] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(UpperCamelCase : Dict , UpperCamelCase : Tuple ):
with strategy.scope():
_snake_case : Dict = strategy.experimental_local_results(_lowercase )
local_variables[0].assign(_lowercase )
local_variables[1].assign(_lowercase )
strategy.run(_lowercase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowercase )
def _check_local_values(UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ):
_snake_case : List[str] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowercase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , _lowercase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 411 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
def lowerCamelCase( a__ ,a__ ,a__):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''')
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''')
if years_to_repay <= 0 or not isinstance(snake_case_ ,snake_case_):
raise Exception('''Years to repay must be an integer > 0''')
# Yearly rate is divided by 12 to get monthly rate
_SCREAMING_SNAKE_CASE =rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_SCREAMING_SNAKE_CASE =years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 691 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
a : int =LEDTokenizer
a : Optional[Any] =LEDTokenizerFast
a : List[Any] =True
def _a ( self ):
super().setUp()
UpperCamelCase_: Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase_: Tuple = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
UpperCamelCase_: str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase_: int = {'unk_token': '<unk>'}
UpperCamelCase_: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowercase ) )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def _a ( self , _lowerCamelCase ):
return "lower newer", "lower newer"
@cached_property
def _a ( self ):
return LEDTokenizer.from_pretrained('allenai/led-base-16384' )
@cached_property
def _a ( self ):
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384' )
@require_torch
def _a ( self ):
UpperCamelCase_: Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCamelCase_: List[Any] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: List[str] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors='pt' )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def _a ( self ):
UpperCamelCase_: List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: List[str] = tokenizer(_lowercase , padding=_lowercase , return_tensors='pt' )
self.assertIn('input_ids' , _lowercase )
self.assertIn('attention_mask' , _lowercase )
self.assertNotIn('labels' , _lowercase )
self.assertNotIn('decoder_attention_mask' , _lowercase )
@require_torch
def _a ( self ):
UpperCamelCase_: int = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Optional[Any] = tokenizer(text_target=_lowercase , max_length=3_2 , padding='max_length' , return_tensors='pt' )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
@require_torch
def _a ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Any = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=_lowercase , truncation=_lowercase , return_tensors='pt' )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def _a ( self ):
UpperCamelCase_: int = ['A long paragraph for summarization.']
UpperCamelCase_: List[str] = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Optional[int] = tokenizer(_lowercase , return_tensors='pt' )
UpperCamelCase_: List[Any] = tokenizer(text_target=_lowercase , return_tensors='pt' )
UpperCamelCase_: Optional[Any] = inputs['input_ids']
UpperCamelCase_: Tuple = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _a ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Tuple = ['Summary of the text.', 'Another summary.']
UpperCamelCase_: int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
UpperCamelCase_: Tuple = tokenizer(_lowercase , padding=_lowercase )
UpperCamelCase_: str = [[0] * len(_lowercase ) for x in encoded_output['input_ids']]
UpperCamelCase_: Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs['global_attention_mask'] , _lowercase )
def _a ( self ):
pass
def _a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Tuple = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCamelCase_: Any = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCamelCase_: Any = 'A, <mask> AllenNLP sentence.'
UpperCamelCase_: Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
UpperCamelCase_: List[str] = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCamelCase_: Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCamelCase_: Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 57 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _lowerCAmelCase ( _lowerCAmelCase ):
"""simple docstring"""
_lowercase : Tuple = "camembert"
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any=3_0_5_2_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Union[str, Any]=1_2 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : int=3_0_7_2 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=5_1_2 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1E-12 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]="absolute" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase)
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = position_embedding_type
snake_case__ = use_cache
snake_case__ = classifier_dropout
class _lowerCAmelCase ( _lowerCAmelCase ):
"""simple docstring"""
@property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
if self.task == "multiple-choice":
snake_case__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 654 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 519 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
_UpperCAmelCase = MobileBertTokenizer
_UpperCAmelCase = MobileBertTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = filter_non_english
_UpperCAmelCase = "google/mobilebert-uncased"
def __lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_lowerCAmelCase : str = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'UNwant\u00E9d,running'
_lowerCAmelCase : Union[str, Any] = 'unwanted, running'
return input_text, output_text
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase : Tuple = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowercase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) ,[9, 6, 7, 12, 10, 11] )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[int] = 'UNwant\u00E9d,running'
_lowerCAmelCase : List[Any] = tokenizer.tokenize(_lowercase )
_lowerCAmelCase : List[Any] = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
_lowerCAmelCase : List[str] = rust_tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = tokenizer.encode(_lowercase )
_lowerCAmelCase : Dict = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
# With lower casing
_lowerCAmelCase : Optional[int] = self.get_tokenizer(do_lower_case=_lowercase )
_lowerCAmelCase : Any = self.get_rust_tokenizer(do_lower_case=_lowercase )
_lowerCAmelCase : Tuple = 'UNwant\u00E9d,running'
_lowerCAmelCase : Any = tokenizer.tokenize(_lowercase )
_lowerCAmelCase : List[Any] = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
_lowerCAmelCase : List[str] = tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
_lowerCAmelCase : Any = rust_tokenizer.encode(_lowercase ,add_special_tokens=_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(_lowercase )
_lowerCAmelCase : List[str] = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=_lowercase ,strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = BasicTokenizer(do_lower_case=_lowercase ,strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = BasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=_lowercase ,strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = BasicTokenizer(do_lower_case=_lowercase ,strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = BasicTokenizer(do_lower_case=_lowercase ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_lowerCAmelCase : Tuple = {}
for i, token in enumerate(_lowercase ):
_lowerCAmelCase : Union[str, Any] = i
_lowerCAmelCase : int = WordpieceTokenizer(vocab=_lowercase ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowercase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_lowercase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
_lowerCAmelCase : Optional[int] = tokenizer.encode('sequence builders' ,add_special_tokens=_lowercase )
_lowerCAmelCase : int = tokenizer.encode('multi-sequence build' ,add_special_tokens=_lowercase )
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(_lowercase )
_lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(_lowercase ,_lowercase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __lowerCamelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowercase ,**_lowercase )
_lowerCAmelCase : Any = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_lowerCAmelCase : Optional[Any] = tokenizer_r.encode_plus(
_lowercase ,return_attention_mask=_lowercase ,return_token_type_ids=_lowercase ,return_offsets_mapping=_lowercase ,add_special_tokens=_lowercase ,)
_lowerCAmelCase : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_lowercase ,'do_lower_case' ) else False
_lowerCAmelCase : Union[str, Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ['的', '人', '有']
_lowerCAmelCase : List[str] = ''.join(_lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(_lowercase ,**_lowercase )
_lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(_lowercase ,**_lowercase )
_lowerCAmelCase : int = tokenizer_p.encode(_lowercase ,add_special_tokens=_lowercase )
_lowerCAmelCase : str = tokenizer_r.encode(_lowercase ,add_special_tokens=_lowercase )
_lowerCAmelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_lowercase )
_lowerCAmelCase : Dict = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowercase ,_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
_lowerCAmelCase : str = False
_lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_lowercase ,**_lowercase )
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained(_lowercase ,**_lowercase )
_lowerCAmelCase : int = tokenizer_r.encode(_lowercase ,add_special_tokens=_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer_p.encode(_lowercase ,add_special_tokens=_lowercase )
_lowerCAmelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(_lowercase )
_lowerCAmelCase : str = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase : Dict = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_lowercase )
]
self.assertListEqual(_lowercase ,_lowercase )
self.assertListEqual(_lowercase ,_lowercase )
| 259 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : int ,UpperCamelCase : int ,UpperCamelCase : int ) -> Union[str, Any]:
_lowercase : List[Any] = jnp.ones((batch_size, length) ) / length
return scores
def _lowerCamelCase ( self : str ) -> Any:
_lowercase : Dict = None
_lowercase : List[Any] = 20
_lowercase : List[str] = self._get_uniform_logits(batch_size=2 ,length=_lowercase )
# tweak scores to not be uniform anymore
_lowercase : Tuple = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_lowercase : List[str] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_lowercase : Optional[Any] = jax.nn.softmax(_lowercase ,axis=-1 )
_lowercase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowercase : int = FlaxTemperatureLogitsWarper(temperature=1.3 )
_lowercase : Any = jax.nn.softmax(temp_dist_warper_sharper(_lowercase ,scores.copy() ,cur_len=_lowercase ) ,axis=-1 )
_lowercase : Dict = jax.nn.softmax(temp_dist_warper_smoother(_lowercase ,scores.copy() ,cur_len=_lowercase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def _lowerCamelCase ( self : List[Any] ) -> List[str]:
_lowercase : List[str] = None
_lowercase : str = 10
_lowercase : List[str] = 2
# create ramp distribution
_lowercase : int = np.broadcast_to(np.arange(_lowercase )[None, :] ,(batch_size, vocab_size) ).copy()
_lowercase : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_lowercase : List[str] = FlaxTopKLogitsWarper(3 )
_lowercase : Optional[int] = top_k_warp(_lowercase ,_lowercase ,cur_len=_lowercase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_lowercase : List[Any] = 5
_lowercase : List[Any] = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
_lowercase : Optional[Any] = np.broadcast_to(np.arange(_lowercase )[None, :] ,(batch_size, length) ).copy()
_lowercase : Any = top_k_warp_safety_check(_lowercase ,_lowercase ,cur_len=_lowercase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def _lowerCamelCase ( self : str ) -> Optional[Any]:
_lowercase : Optional[int] = None
_lowercase : List[Any] = 10
_lowercase : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_lowercase : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
_lowercase : Dict = FlaxTopPLogitsWarper(0.8 )
_lowercase : Union[str, Any] = np.exp(top_p_warp(_lowercase ,_lowercase ,cur_len=_lowercase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_lowercase : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_lowercase ,_lowercase ,atol=1e-3 ) )
# check edge cases with negative and extreme logits
_lowercase : Tuple = np.broadcast_to(np.arange(_lowercase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_lowercase : Tuple = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
_lowercase : str = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
_lowercase : Tuple = top_p_warp(_lowercase ,_lowercase ,cur_len=_lowercase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def _lowerCamelCase ( self : List[str] ) -> Any:
_lowercase : Optional[int] = 20
_lowercase : Union[str, Any] = 4
_lowercase : Dict = 0
_lowercase : int = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowercase )
# check that min length is applied at length 5
_lowercase : Dict = ids_tensor((batch_size, 20) ,vocab_size=20 )
_lowercase : List[Any] = 5
_lowercase : Tuple = self._get_uniform_logits(_lowercase ,_lowercase )
_lowercase : List[str] = min_dist_processor(_lowercase ,_lowercase ,cur_len=_lowercase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
_lowercase : Dict = self._get_uniform_logits(_lowercase ,_lowercase )
_lowercase : Union[str, Any] = 15
_lowercase : Dict = min_dist_processor(_lowercase ,_lowercase ,cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def _lowerCamelCase ( self : List[Any] ) -> List[str]:
_lowercase : Union[str, Any] = 20
_lowercase : Optional[int] = 4
_lowercase : List[str] = 0
_lowercase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
# check that all scores are -inf except the bos_token_id score
_lowercase : Dict = ids_tensor((batch_size, 1) ,vocab_size=20 )
_lowercase : Dict = 1
_lowercase : Optional[Any] = self._get_uniform_logits(_lowercase ,_lowercase )
_lowercase : str = logits_processor(_lowercase ,_lowercase ,cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_lowercase : List[Any] = 3
_lowercase : List[Any] = self._get_uniform_logits(_lowercase ,_lowercase )
_lowercase : str = logits_processor(_lowercase ,_lowercase ,cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def _lowerCamelCase ( self : Optional[int] ) -> Dict:
_lowercase : List[str] = 20
_lowercase : Any = 4
_lowercase : str = 0
_lowercase : List[str] = 5
_lowercase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase ,eos_token_id=_lowercase )
# check that all scores are -inf except the eos_token_id when max_length is reached
_lowercase : Optional[Any] = ids_tensor((batch_size, 4) ,vocab_size=20 )
_lowercase : List[Any] = 4
_lowercase : Union[str, Any] = self._get_uniform_logits(_lowercase ,_lowercase )
_lowercase : Union[str, Any] = logits_processor(_lowercase ,_lowercase ,cur_len=_lowercase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_lowercase : int = 3
_lowercase : Tuple = self._get_uniform_logits(_lowercase ,_lowercase )
_lowercase : Dict = logits_processor(_lowercase ,_lowercase ,cur_len=_lowercase )
self.assertFalse(jnp.isinf(_lowercase ).any() )
def _lowerCamelCase ( self : Any ) -> List[str]:
_lowercase : Union[str, Any] = 4
_lowercase : int = 10
_lowercase : List[str] = 15
_lowercase : int = 2
_lowercase : List[Any] = 1
_lowercase : Optional[Any] = 15
# dummy input_ids and scores
_lowercase : str = ids_tensor((batch_size, sequence_length) ,_lowercase )
_lowercase : int = input_ids.copy()
_lowercase : List[Any] = self._get_uniform_logits(_lowercase ,_lowercase )
_lowercase : str = scores.copy()
# instantiate all dist processors
_lowercase : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowercase : List[str] = FlaxTopKLogitsWarper(3 )
_lowercase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowercase : int = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowercase )
_lowercase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
_lowercase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase ,eos_token_id=_lowercase )
_lowercase : List[Any] = 10
# no processor list
_lowercase : Optional[int] = temp_dist_warp(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : List[Any] = top_k_warp(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : Union[str, Any] = top_p_warp(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : Tuple = min_dist_proc(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : Optional[Any] = bos_dist_proc(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : Optional[Any] = eos_dist_proc(_lowercase ,_lowercase ,cur_len=_lowercase )
# with processor list
_lowercase : Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowercase : Any = processor(_lowercase ,_lowercase ,cur_len=_lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase ,_lowercase ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def _lowerCamelCase ( self : int ) -> List[Any]:
_lowercase : List[Any] = 4
_lowercase : Optional[int] = 10
_lowercase : Optional[Any] = 15
_lowercase : Optional[Any] = 2
_lowercase : List[str] = 1
_lowercase : int = 15
# dummy input_ids and scores
_lowercase : str = ids_tensor((batch_size, sequence_length) ,_lowercase )
_lowercase : str = input_ids.copy()
_lowercase : List[Any] = self._get_uniform_logits(_lowercase ,_lowercase )
_lowercase : Optional[int] = scores.copy()
# instantiate all dist processors
_lowercase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
_lowercase : Any = FlaxTopKLogitsWarper(3 )
_lowercase : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_lowercase : str = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowercase )
_lowercase : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowercase )
_lowercase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowercase ,eos_token_id=_lowercase )
_lowercase : List[str] = 10
# no processor list
def run_no_processor_list(UpperCamelCase : List[str] ,UpperCamelCase : Optional[Any] ,UpperCamelCase : List[Any] ):
_lowercase : int = temp_dist_warp(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : List[Any] = top_k_warp(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : Tuple = top_p_warp(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : Dict = min_dist_proc(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : Dict = bos_dist_proc(_lowercase ,_lowercase ,cur_len=_lowercase )
_lowercase : List[Any] = eos_dist_proc(_lowercase ,_lowercase ,cur_len=_lowercase )
return scores
# with processor list
def run_processor_list(UpperCamelCase : Union[str, Any] ,UpperCamelCase : Tuple ,UpperCamelCase : int ):
_lowercase : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_lowercase : List[Any] = processor(_lowercase ,_lowercase ,cur_len=_lowercase )
return scores
_lowercase : List[str] = jax.jit(_lowercase )
_lowercase : Optional[int] = jax.jit(_lowercase )
_lowercase : Any = jitted_run_no_processor_list(_lowercase ,_lowercase ,_lowercase )
_lowercase : Optional[Any] = jitted_run_processor_list(_lowercase ,_lowercase ,_lowercase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowercase ,_lowercase ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 125 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class a :
"""simple docstring"""
pass
| 401 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = args.pruning_method
SCREAMING_SNAKE_CASE = args.threshold
SCREAMING_SNAKE_CASE = args.model_name_or_path.rstrip("/" )
SCREAMING_SNAKE_CASE = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
SCREAMING_SNAKE_CASE = torch.load(os.path.join(snake_case_ , "pytorch_model.bin" ) )
SCREAMING_SNAKE_CASE = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
SCREAMING_SNAKE_CASE = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE = MagnitudeBinarizer.apply(inputs=snake_case_ , threshold=snake_case_ )
SCREAMING_SNAKE_CASE = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE = TopKBinarizer.apply(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE = ThresholdBinarizer.apply(snake_case_ , snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE = name[:-6]
SCREAMING_SNAKE_CASE = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -0.1, 1.1
SCREAMING_SNAKE_CASE = torch.sigmoid(snake_case_ )
SCREAMING_SNAKE_CASE = s * (r - l) + l
SCREAMING_SNAKE_CASE = s_bar.clamp(min=0.0 , max=1.0 )
SCREAMING_SNAKE_CASE = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
SCREAMING_SNAKE_CASE = os.path.join(
os.path.dirname(snake_case_ ) , F"bertarized_{os.path.basename(snake_case_ )}" )
if not os.path.isdir(snake_case_ ):
shutil.copytree(snake_case_ , snake_case_ )
print(F"\nCreated folder {target_model_path}" )
torch.save(snake_case_ , os.path.join(snake_case_ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__A : str = parser.parse_args()
main(args)
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _lowercase ( _lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :str , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Dict=("DownEncoderBlock2D",) , lowerCAmelCase__ :Dict=(64,) , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :List[str]="silu" , lowerCAmelCase__ :Optional[int]=True , ) -> Any:
super().__init__()
__SCREAMING_SNAKE_CASE : List[str] = layers_per_block
__SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.Convad(
_lowercase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : int = nn.ModuleList([] )
# down
__SCREAMING_SNAKE_CASE : Any = block_out_channels[0]
for i, down_block_type in enumerate(_lowercase ):
__SCREAMING_SNAKE_CASE : List[Any] = output_channel
__SCREAMING_SNAKE_CASE : Union[str, Any] = block_out_channels[i]
__SCREAMING_SNAKE_CASE : Any = i == len(_lowercase ) - 1
__SCREAMING_SNAKE_CASE : Dict = get_down_block(
_lowercase , num_layers=self.layers_per_block , in_channels=_lowercase , out_channels=_lowercase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , )
self.down_blocks.append(_lowercase )
# mid
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , )
# out
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowercase , eps=1E-6 )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.SiLU()
__SCREAMING_SNAKE_CASE : List[Any] = 2 * out_channels if double_z else out_channels
__SCREAMING_SNAKE_CASE : Dict = nn.Convad(block_out_channels[-1] , _lowercase , 3 , padding=1 )
__SCREAMING_SNAKE_CASE : int = False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = x
__SCREAMING_SNAKE_CASE : Any = self.conv_in(_lowercase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase__ :Dict ):
def custom_forward(*lowerCAmelCase__ :Any ):
return module(*_lowercase )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
__SCREAMING_SNAKE_CASE : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowercase ) , _lowercase , use_reentrant=_lowercase )
# middle
__SCREAMING_SNAKE_CASE : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowercase , use_reentrant=_lowercase )
else:
for down_block in self.down_blocks:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase ) , _lowercase )
# middle
__SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _lowercase )
else:
# down
for down_block in self.down_blocks:
__SCREAMING_SNAKE_CASE : Dict = down_block(_lowercase )
# middle
__SCREAMING_SNAKE_CASE : Optional[Any] = self.mid_block(_lowercase )
# post-process
__SCREAMING_SNAKE_CASE : List[str] = self.conv_norm_out(_lowercase )
__SCREAMING_SNAKE_CASE : int = self.conv_act(_lowercase )
__SCREAMING_SNAKE_CASE : List[str] = self.conv_out(_lowercase )
return sample
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :List[str]=("UpDecoderBlock2D",) , lowerCAmelCase__ :List[str]=(64,) , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Optional[Any]="silu" , lowerCAmelCase__ :List[Any]="group" , ) -> int:
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = layers_per_block
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Convad(
_lowercase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
__SCREAMING_SNAKE_CASE : Any = in_channels if norm_type == '''spatial''' else None
# mid
__SCREAMING_SNAKE_CASE : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , )
# up
__SCREAMING_SNAKE_CASE : List[Any] = list(reversed(_lowercase ) )
__SCREAMING_SNAKE_CASE : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowercase ):
__SCREAMING_SNAKE_CASE : List[Any] = output_channel
__SCREAMING_SNAKE_CASE : Optional[int] = reversed_block_out_channels[i]
__SCREAMING_SNAKE_CASE : Dict = i == len(_lowercase ) - 1
__SCREAMING_SNAKE_CASE : Tuple = get_up_block(
_lowercase , num_layers=self.layers_per_block + 1 , in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , resnet_time_scale_shift=_lowercase , )
self.up_blocks.append(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = output_channel
# out
if norm_type == "spatial":
__SCREAMING_SNAKE_CASE : Dict = SpatialNorm(block_out_channels[0] , _lowercase )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowercase , eps=1E-6 )
__SCREAMING_SNAKE_CASE : Tuple = nn.SiLU()
__SCREAMING_SNAKE_CASE : Dict = nn.Convad(block_out_channels[0] , _lowercase , 3 , padding=1 )
__SCREAMING_SNAKE_CASE : Tuple = False
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any]=None ) -> int:
__SCREAMING_SNAKE_CASE : int = z
__SCREAMING_SNAKE_CASE : str = self.conv_in(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase__ :Optional[Any] ):
def custom_forward(*lowerCAmelCase__ :Optional[int] ):
return module(*_lowercase )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
__SCREAMING_SNAKE_CASE : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowercase , _lowercase , use_reentrant=_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = sample.to(_lowercase )
# up
for up_block in self.up_blocks:
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowercase ) , _lowercase , _lowercase , use_reentrant=_lowercase )
else:
# middle
__SCREAMING_SNAKE_CASE : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : int = sample.to(_lowercase )
# up
for up_block in self.up_blocks:
__SCREAMING_SNAKE_CASE : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase ) , _lowercase , _lowercase )
else:
# middle
__SCREAMING_SNAKE_CASE : List[str] = self.mid_block(_lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowercase )
# up
for up_block in self.up_blocks:
__SCREAMING_SNAKE_CASE : Union[str, Any] = up_block(_lowercase , _lowercase )
# post-process
if latent_embeds is None:
__SCREAMING_SNAKE_CASE : List[str] = self.conv_norm_out(_lowercase )
else:
__SCREAMING_SNAKE_CASE : Dict = self.conv_norm_out(_lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : Tuple = self.conv_act(_lowercase )
__SCREAMING_SNAKE_CASE : Dict = self.conv_out(_lowercase )
return sample
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Optional[Any]="random" , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Optional[Any]=True ) -> Dict:
super().__init__()
__SCREAMING_SNAKE_CASE : List[Any] = n_e
__SCREAMING_SNAKE_CASE : Dict = vq_embed_dim
__SCREAMING_SNAKE_CASE : Tuple = beta
__SCREAMING_SNAKE_CASE : List[Any] = legacy
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__SCREAMING_SNAKE_CASE : Optional[int] = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
__SCREAMING_SNAKE_CASE : List[Any] = self.used.shape[0]
__SCREAMING_SNAKE_CASE : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__SCREAMING_SNAKE_CASE : Dict = self.re_embed
__SCREAMING_SNAKE_CASE : int = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = n_e
__SCREAMING_SNAKE_CASE : Tuple = sane_index_shape
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(_lowercase ) > 1
__SCREAMING_SNAKE_CASE : Optional[int] = inds.reshape(ishape[0] , -1 )
__SCREAMING_SNAKE_CASE : int = self.used.to(_lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
__SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
__SCREAMING_SNAKE_CASE : str = match.sum(2 ) < 1
if self.unknown_index == "random":
__SCREAMING_SNAKE_CASE : Any = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.unknown_index
return new.reshape(_lowercase )
def __magic_name__( self :Any , lowerCAmelCase__ :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : str = inds.shape
assert len(_lowercase ) > 1
__SCREAMING_SNAKE_CASE : Dict = inds.reshape(ishape[0] , -1 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.used.to(_lowercase )
if self.re_embed > self.used.shape[0]: # extra token
__SCREAMING_SNAKE_CASE : Optional[int] = 0 # simply set to zero
__SCREAMING_SNAKE_CASE : Optional[int] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowercase )
return back.reshape(_lowercase )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[str] ) -> List[Any]:
# reshape z -> (batch, height, width, channel) and flatten
__SCREAMING_SNAKE_CASE : str = z.permute(0 , 2 , 3 , 1 ).contiguous()
__SCREAMING_SNAKE_CASE : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(_lowercase , self.embedding.weight ) , dim=1 )
__SCREAMING_SNAKE_CASE : Dict = self.embedding(_lowercase ).view(z.shape )
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
# compute loss for embedding
if not self.legacy:
__SCREAMING_SNAKE_CASE : Tuple = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__SCREAMING_SNAKE_CASE : str = z + (z_q - z).detach()
# reshape back to match original input shape
__SCREAMING_SNAKE_CASE : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(_lowercase )
__SCREAMING_SNAKE_CASE : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__SCREAMING_SNAKE_CASE : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] ) -> int:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
__SCREAMING_SNAKE_CASE : Any = self.unmap_to_all(_lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__SCREAMING_SNAKE_CASE : str = self.embedding(_lowercase )
if shape is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = z_q.view(_lowercase )
# reshape back to match original input shape
__SCREAMING_SNAKE_CASE : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _lowercase ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict=False ) -> List[str]:
__SCREAMING_SNAKE_CASE : Dict = parameters
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = torch.chunk(_lowercase , 2 , dim=1 )
__SCREAMING_SNAKE_CASE : List[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
__SCREAMING_SNAKE_CASE : List[Any] = deterministic
__SCREAMING_SNAKE_CASE : Optional[int] = torch.exp(0.5 * self.logvar )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Optional[torch.Generator] = None ) -> List[str]:
# make sure sample is on the same device as the parameters and has same dtype
__SCREAMING_SNAKE_CASE : str = randn_tensor(
self.mean.shape , generator=_lowercase , device=self.parameters.device , dtype=self.parameters.dtype )
__SCREAMING_SNAKE_CASE : Optional[int] = self.mean + self.std * sample
return x
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Tuple=None ) -> Tuple:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __magic_name__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :str=[1, 2, 3] ) -> Optional[int]:
if self.deterministic:
return torch.Tensor([0.0] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_lowercase )
def __magic_name__( self :List[str] ) -> Any:
return self.mean
| 696 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 118 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : List[str] =MODEL_FOR_CAUSAL_LM_MAPPING
a_ : Optional[int] =TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
_snake_case : Optional[int] = text_generator('This is a test' , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
_snake_case : Any = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
_lowercase , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
_snake_case : List[Any] = text_generator('This is a test' , do_sample=_lowercase , num_return_sequences=2 , return_tensors=_lowercase )
self.assertEqual(
_lowercase , [
{'generated_token_ids': ANY(_lowercase )},
{'generated_token_ids': ANY(_lowercase )},
] , )
_snake_case : Tuple = text_generator.model.config.eos_token_id
_snake_case : Tuple = '<pad>'
_snake_case : Tuple = text_generator(
['This is a test', 'This is a second test'] , do_sample=_lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowercase , )
self.assertEqual(
_lowercase , [
[
{'generated_token_ids': ANY(_lowercase )},
{'generated_token_ids': ANY(_lowercase )},
],
[
{'generated_token_ids': ANY(_lowercase )},
{'generated_token_ids': ANY(_lowercase )},
],
] , )
@require_tf
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
_snake_case : Optional[int] = text_generator('This is a test' , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
_snake_case : str = text_generator(['This is a test', 'This is a second test'] , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def UpperCamelCase_ ( self : Any , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[int] = TextGenerationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = 'Hello I believe in'
_snake_case : Tuple = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
_snake_case : List[Any] = text_generator(_lowercase )
self.assertEqual(
_lowercase , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
_snake_case : int = text_generator(_lowercase , stop_sequence=' fe' )
self.assertEqual(_lowercase , [{'generated_text': 'Hello I believe in fe'}] )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : str = text_generator.model
_snake_case : List[str] = text_generator.tokenizer
_snake_case : Optional[Any] = text_generator('This is a test' )
self.assertEqual(_lowercase , [{'generated_text': ANY(_lowercase )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
_snake_case : Union[str, Any] = text_generator('This is a test' , return_full_text=_lowercase )
self.assertEqual(_lowercase , [{'generated_text': ANY(_lowercase )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
_snake_case : Optional[int] = pipeline(task='text-generation' , model=_lowercase , tokenizer=_lowercase , return_full_text=_lowercase )
_snake_case : Union[str, Any] = text_generator('This is a test' )
self.assertEqual(_lowercase , [{'generated_text': ANY(_lowercase )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
_snake_case : Optional[Any] = text_generator('This is a test' , return_full_text=_lowercase )
self.assertEqual(_lowercase , [{'generated_text': ANY(_lowercase )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
_snake_case : Any = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{'generated_text': ANY(_lowercase )}, {'generated_text': ANY(_lowercase )}],
[{'generated_text': ANY(_lowercase )}, {'generated_text': ANY(_lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_snake_case : Optional[int] = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{'generated_text': ANY(_lowercase )}, {'generated_text': ANY(_lowercase )}],
[{'generated_text': ANY(_lowercase )}, {'generated_text': ANY(_lowercase )}],
] , )
with self.assertRaises(_lowercase ):
_snake_case : int = text_generator('test' , return_full_text=_lowercase , return_text=_lowercase )
with self.assertRaises(_lowercase ):
_snake_case : Dict = text_generator('test' , return_full_text=_lowercase , return_tensors=_lowercase )
with self.assertRaises(_lowercase ):
_snake_case : int = text_generator('test' , return_text=_lowercase , return_tensors=_lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_snake_case : Tuple = text_generator('' )
self.assertEqual(_lowercase , [{'generated_text': ANY(_lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_snake_case : List[str] = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_snake_case : Union[str, Any] = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 5_00 , max_new_tokens=20 )
_snake_case : Dict = text_generator('This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_lowercase ):
text_generator(
'This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
_snake_case : Tuple = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_snake_case : Optional[Any] = pipe('This is a test' )
self.assertEqual(
_lowercase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_snake_case : Optional[int] = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_snake_case : str = pipe('This is a test' )
self.assertEqual(
_lowercase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_snake_case : Dict = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_snake_case : List[Any] = pipe('This is a test' )
self.assertEqual(
_lowercase , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
import torch
_snake_case : Tuple = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
import torch
_snake_case : str = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=_lowercase , top_p=0.5 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : List[Any] = 'Hello world'
_snake_case : List[str] = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
_snake_case : Union[str, Any] = logging.get_logger('transformers.generation.tf_utils' )
else:
_snake_case : Dict = logging.get_logger('transformers.generation.utils' )
_snake_case : Optional[int] = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_lowercase ) as cl:
_snake_case : Union[str, Any] = text_generator(_lowercase , max_length=10 , max_new_tokens=1 )
self.assertIn(_lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_lowercase ) as cl:
_snake_case : List[str] = text_generator(_lowercase , max_new_tokens=1 )
self.assertNotIn(_lowercase , cl.out )
with CaptureLogger(_lowercase ) as cl:
_snake_case : List[str] = text_generator(_lowercase , max_length=10 )
self.assertNotIn(_lowercase , cl.out )
| 411 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
snake_case_ : List[Any] = logging.getLogger(__name__)
def lowerCamelCase( a__=2 ,a__=3 ,a__=16 ,a__ = 10 ,a__ = 2):
def get_dataset(a__):
_SCREAMING_SNAKE_CASE =torch.randn(batch_size * n_batches ,1)
return TensorDataset(snake_case_ ,a * x + b + 0.1 * torch.randn(batch_size * n_batches ,1))
_SCREAMING_SNAKE_CASE =get_dataset(snake_case_)
_SCREAMING_SNAKE_CASE =get_dataset(snake_case_)
_SCREAMING_SNAKE_CASE =DataLoader(snake_case_ ,shuffle=snake_case_ ,batch_size=snake_case_ ,num_workers=4)
_SCREAMING_SNAKE_CASE =DataLoader(snake_case_ ,shuffle=snake_case_ ,batch_size=snake_case_ ,num_workers=4)
return (train_dataloader, valid_dataloader)
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__ ,a__=None):
_SCREAMING_SNAKE_CASE =[]
for epoch in range(snake_case_):
# Train quickly
model.train()
for batch in dataloader:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =batch
_SCREAMING_SNAKE_CASE =model(snake_case_)
_SCREAMING_SNAKE_CASE =torch.nn.functional.mse_loss(snake_case_ ,snake_case_)
accelerator.backward(snake_case_)
optimizer.step()
optimizer.zero_grad()
rands.append(random.random()) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class A__ ( nn.Module ):
def __init__( self : int ) -> List[str]:
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.randn(1 ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.randn(1 ) )
def __UpperCamelCase ( self : Dict , _a : List[Any] ) -> Any:
"""simple docstring"""
return x * self.a + self.b
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE =DummyModel()
_SCREAMING_SNAKE_CASE =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =dummy_dataloaders()
_SCREAMING_SNAKE_CASE =ProjectConfiguration(total_limit=1 , project_dir=_lowercase , automatic_checkpoint_naming=_lowercase )
# Train baseline
_SCREAMING_SNAKE_CASE =Accelerator(project_config=_lowercase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE =DummyModel()
_SCREAMING_SNAKE_CASE =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =dummy_dataloaders()
# Train baseline
_SCREAMING_SNAKE_CASE =Accelerator()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
_SCREAMING_SNAKE_CASE =os.path.join(_lowercase , '''initial''' )
accelerator.save_state(_lowercase )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE =optimizer.state_dict()
_SCREAMING_SNAKE_CASE =train(3 , _lowercase , _lowercase , _lowercase , _lowercase )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE =optimizer.state_dict()
# Train partially
set_seed(42 )
_SCREAMING_SNAKE_CASE =DummyModel()
_SCREAMING_SNAKE_CASE =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =dummy_dataloaders()
_SCREAMING_SNAKE_CASE =Accelerator()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.load_state(_lowercase )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE =optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =train(2 , _lowercase , _lowercase , _lowercase , _lowercase )
# Save everything
_SCREAMING_SNAKE_CASE =os.path.join(_lowercase , '''checkpoint''' )
accelerator.save_state(_lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(_lowercase )
test_rands += train(1 , _lowercase , _lowercase , _lowercase , _lowercase )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE =optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE =DummyModel()
_SCREAMING_SNAKE_CASE =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =dummy_dataloaders()
_SCREAMING_SNAKE_CASE =ProjectConfiguration(automatic_checkpoint_naming=_lowercase )
# Train baseline
_SCREAMING_SNAKE_CASE =Accelerator(project_dir=_lowercase , project_config=_lowercase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE =optimizer.state_dict()
_SCREAMING_SNAKE_CASE =train(3 , _lowercase , _lowercase , _lowercase , _lowercase )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE =optimizer.state_dict()
# Train partially
set_seed(42 )
_SCREAMING_SNAKE_CASE =DummyModel()
_SCREAMING_SNAKE_CASE =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =dummy_dataloaders()
_SCREAMING_SNAKE_CASE =ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_lowercase )
_SCREAMING_SNAKE_CASE =Accelerator(project_dir=_lowercase , project_config=_lowercase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.load_state(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_0''' ) )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE =optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =train(2 , _lowercase , _lowercase , _lowercase , _lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , _lowercase , _lowercase , _lowercase , _lowercase )
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =model.a.item(), model.b.item()
_SCREAMING_SNAKE_CASE =optimizer.state_dict()
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.tensor([1, 2, 3] )
_SCREAMING_SNAKE_CASE =torch.tensor([2, 3, 4] )
_SCREAMING_SNAKE_CASE =DummyModel()
_SCREAMING_SNAKE_CASE =torch.optim.Adam(net.parameters() )
_SCREAMING_SNAKE_CASE =Accelerator()
with self.assertRaises(_lowercase ) as ve:
accelerator.register_for_checkpointing(_lowercase , _lowercase , _lowercase , _lowercase )
_SCREAMING_SNAKE_CASE =str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE =DummyModel()
_SCREAMING_SNAKE_CASE =torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_SCREAMING_SNAKE_CASE =torch.optim.lr_scheduler.StepLR(_lowercase , step_size=1 , gamma=0.99 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =dummy_dataloaders()
_SCREAMING_SNAKE_CASE =ProjectConfiguration(automatic_checkpoint_naming=_lowercase )
# Train baseline
_SCREAMING_SNAKE_CASE =Accelerator(project_dir=_lowercase , project_config=_lowercase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Save initial
accelerator.save_state()
_SCREAMING_SNAKE_CASE =scheduler.state_dict()
train(3 , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
self.assertNotEqual(_lowercase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(_lowercase , scheduler.state_dict() )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_SCREAMING_SNAKE_CASE =DummyModel()
_SCREAMING_SNAKE_CASE =ProjectConfiguration(automatic_checkpoint_naming=_lowercase , total_limit=2 )
# Train baseline
_SCREAMING_SNAKE_CASE =Accelerator(project_dir=_lowercase , project_config=_lowercase )
_SCREAMING_SNAKE_CASE =accelerator.prepare(_lowercase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_lowercase , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Union[str, Any] = '/tmp/accelerate/state_checkpointing'
snake_case_ : int = DummyModel()
snake_case_ : str = torch.optim.Adam(params=model.parameters(), lr=1e-3)
snake_case_ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
snake_case_ : Optional[int] = dummy_dataloaders()
snake_case_ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
snake_case_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
snake_case_ : int = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
snake_case_ : Optional[int] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
snake_case_ : List[str] = group['params'][0].device
break
assert param_device.type == accelerator.device.type
snake_case_ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
snake_case_ : Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
snake_case_ : int = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 691 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A_ : int = logging.get_logger(__name__)
A_ : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
A_ : int = {
'yjernite/retribert-base-uncased': 512,
}
A_ : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _lowerCAmelCase( _lowerCAmelCase ):
"""simple docstring"""
a : str =VOCAB_FILES_NAMES
a : Dict =PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str =PRETRAINED_INIT_CONFIGURATION
a : Optional[Any] =RetriBertTokenizer
a : List[Any] =["input_ids", "attention_mask"]
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
UpperCamelCase_: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
UpperCamelCase_: Optional[int] = getattr(_lowercase , normalizer_state.pop('type' ) )
UpperCamelCase_: List[Any] = do_lower_case
UpperCamelCase_: Optional[Any] = strip_accents
UpperCamelCase_: Tuple = tokenize_chinese_chars
UpperCamelCase_: Union[str, Any] = normalizer_class(**_lowercase )
UpperCamelCase_: List[Any] = do_lower_case
def _a ( self , _lowerCamelCase , _lowerCamelCase=None ):
UpperCamelCase_: str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: Optional[Any] = [self.sep_token_id]
UpperCamelCase_: Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: str = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 57 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _UpperCAmelCase ( a : Dict=32 , a : Dict=10 , a : Union[str, Any]=100 , a : Union[str, Any]=1026 , a : str=True , a : List[str]="data/tokenized_stories_train_wikitext103.jbl" , a : List[str]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
snake_case__ , snake_case__ = generate_datasets(
snake_case_ , snake_case_ , number=snake_case_ , min_len=1026 , trim=snake_case_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
snake_case__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
snake_case__ = load_gpta("""gpt2""" ).to(snake_case_ )
print("""computing perplexity on objective set""" )
snake_case__ = compute_perplexity(snake_case_ , snake_case_ , snake_case_ ).item()
print("""perplexity on objective set:""" , snake_case_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _UpperCAmelCase ( a : Dict , a : List[Any]=15 , a : Any=128 , a : int=100 , a : List[str]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
snake_case__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
snake_case__ = SecondaryLearner(snake_case_ )
# Train secondary learner
snake_case__ = train_secondary_learner(
snake_case_ , snake_case_ , max_epochs=snake_case_ , batch_size=snake_case_ , eval_freq=100 , igf_model_path=snake_case_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _UpperCAmelCase ( a : str , a : int , a : str , a : List[str]=32 , a : Optional[Any]=1000 , a : Optional[Any]=16 , a : List[str]=1.0 , a : List[Any]=recopy_gpta , a : Optional[int]=None , a : Optional[int]=10 , a : Tuple="gpt2_finetuned.pt" , ):
snake_case__ = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
snake_case__ = RandomSampler(snake_case_ )
snake_case__ = DataLoader(snake_case_ , sampler=snake_case_ )
snake_case__ = max_steps // (len(snake_case_ )) + 1
snake_case__ = 0
snake_case__ = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case_ )
snake_case__ , snake_case__ , snake_case__ = recopy_model(snake_case_ , snake_case_ , snake_case_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case_ )
secondary_learner.eval()
snake_case__ = []
snake_case__ = 0
snake_case__ = []
snake_case__ = []
# Compute the performance of the transformer model at the beginning
snake_case__ = compute_perplexity(snake_case_ , snake_case_ , snake_case_ )
test_perps.append(snake_case_ )
print("""Test perplexity, step""" , snake_case_ , """:""" , snake_case_ )
for epoch in range(int(snake_case_ ) ):
for step, example in enumerate(snake_case_ ):
torch.cuda.empty_cache()
snake_case__ = random.randint(0 , example.size(2 ) - context_len - 1 )
snake_case__ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
snake_case__ = model(snake_case_ , labels=snake_case_ )
snake_case__ = True
if secondary_learner is not None:
snake_case__ = secondary_learner.forward(
torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
snake_case__ = -1
if predicted_q < threshold:
snake_case__ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
snake_case__ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
snake_case__ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
snake_case__ = compute_perplexity(snake_case_ , snake_case_ , snake_case_ )
test_perps.append(snake_case_ )
print("""Test perplexity, step""" , snake_case_ , """:""" , snake_case_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _UpperCAmelCase ( ):
snake_case__ = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=snake_case_ , default=snake_case_ , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=snake_case_ , default=snake_case_ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=snake_case_ , type=snake_case_ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=snake_case_ , default=snake_case_ , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=snake_case_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=100 , type=snake_case_ , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=100 , type=snake_case_ , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1000 , type=snake_case_ , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=128 , type=snake_case_ , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=snake_case_ , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=snake_case_ , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=100 , type=snake_case_ , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1026 , type=snake_case_ , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=snake_case_ , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=snake_case_ , type=snake_case_ , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=snake_case_ , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=snake_case_ , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=snake_case_ , type=snake_case_ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=snake_case_ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
snake_case__ = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
snake_case__ = training_secondary_learner(
snake_case_ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
snake_case__ = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
snake_case__ , snake_case__ = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1026 , trim=snake_case_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case_ , snake_case_ , snake_case_ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=snake_case_ , secondary_learner=snake_case_ , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 654 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _UpperCAmelCase ( UpperCAmelCase : Dict ):
"""simple docstring"""
__lowerCamelCase : Tuple = {}
__lowerCamelCase : List[Any] = job["""started_at"""]
__lowerCamelCase : List[str] = job["""completed_at"""]
__lowerCamelCase : int = date_parser.parse(snake_case_ )
__lowerCamelCase : List[str] = date_parser.parse(snake_case_ )
__lowerCamelCase : Tuple = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
__lowerCamelCase : Any = start
__lowerCamelCase : Optional[int] = end
__lowerCamelCase : int = duration_in_min
return job_info
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=None ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = None
if token is not None:
__lowerCamelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__lowerCamelCase : List[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__lowerCamelCase : str = requests.get(snake_case_ , headers=snake_case_ ).json()
__lowerCamelCase : Optional[Any] = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case_ ) for job in result["""jobs"""]} )
__lowerCamelCase : Union[str, Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case_ ):
__lowerCamelCase : int = requests.get(url + f"""&page={i + 2}""" , headers=snake_case_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(snake_case_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : List[str] = get_job_time(args.workflow_run_id)
__UpperCamelCase : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 519 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : int = str(snake_case_ )
while len(snake_case_ ) != 1:
_lowerCAmelCase : Optional[int] = [int(snake_case_ ) for i in num_string]
_lowerCAmelCase : List[Any] = 1
for i in range(0 , len(snake_case_ ) ):
total *= numbers[i]
_lowerCAmelCase : List[Any] = str(snake_case_ )
steps += 1
return steps
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Tuple = str(snake_case_ )
while len(snake_case_ ) != 1:
_lowerCAmelCase : int = [int(snake_case_ ) for i in num_string]
_lowerCAmelCase : Optional[int] = 0
for i in range(0 , len(snake_case_ ) ):
total += numbers[i]
_lowerCAmelCase : Optional[Any] = str(snake_case_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
'''simple docstring'''
A = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : float) -> int:
'''simple docstring'''
assert type(snake_case_) in (int, float) and decimal == int(snake_case_)
_lowercase : List[str] = int(snake_case_)
_lowercase : str = ''
_lowercase : Union[str, Any] = False
if decimal < 0:
_lowercase : Optional[int] = True
decimal *= -1
while decimal > 0:
_lowercase , _lowercase : str = divmod(snake_case_ , 16)
_lowercase : Tuple = values[remainder] + hexadecimal
_lowercase : Any = '0x' + hexadecimal
if negative:
_lowercase : List[Any] = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 125 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 401 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = None
@property
def _snake_case ( self : Dict ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowercase , "feature_size" ) )
self.assertTrue(hasattr(_lowercase , "sampling_rate" ) )
self.assertTrue(hasattr(_lowercase , "padding_value" ) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowercase ) == len(_lowercase ) for x, y in zip(_lowercase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
SCREAMING_SNAKE_CASE = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
SCREAMING_SNAKE_CASE = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowercase )
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
SCREAMING_SNAKE_CASE = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _snake_case ( self : Tuple , __lowerCamelCase : Dict=False ):
def _inputs_have_equal_length(__lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = len(input[0] )
for input_slice in input[1:]:
if len(_lowercase ) != length:
return False
return True
def _inputs_are_equal(__lowerCamelCase : Any , __lowerCamelCase : int ):
if len(_lowercase ) != len(_lowercase ):
return False
for input_slice_a, input_slice_a in zip(_lowercase , _lowercase ):
if not np.allclose(np.asarray(_lowercase ) , np.asarray(_lowercase ) , atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowercase )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE = self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE = self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE = self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding=_lowercase )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="longest" )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="longest" , return_tensors="np" )
SCREAMING_SNAKE_CASE = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding="max_length" )[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=_lowercase , return_tensors="np" )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="longest" , pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , pad_to_multiple_of=10 , max_length=_lowercase )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , pad_to_multiple_of=10 , max_length=_lowercase , return_tensors="np" , )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertTrue(all(len(_lowercase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
SCREAMING_SNAKE_CASE = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowercase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[str]=False ):
def _inputs_have_equal_length(__lowerCamelCase : Dict ):
SCREAMING_SNAKE_CASE = len(input[0] )
for input_slice in input[1:]:
if len(_lowercase ) != length:
return False
return True
def _inputs_are_equal(__lowerCamelCase : Any , __lowerCamelCase : Any ):
if len(_lowercase ) != len(_lowercase ):
return False
for input_slice_a, input_slice_a in zip(_lowercase , _lowercase ):
if not np.allclose(np.asarray(_lowercase ) , np.asarray(_lowercase ) , atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowercase )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=_lowercase )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="max_length" , max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=_lowercase , )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
# truncate to middle
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_lowercase , return_tensors="np" , )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_lowercase )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
SCREAMING_SNAKE_CASE = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(_inputs_are_equal(_lowercase , _lowercase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , truncation=_lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding="longest" , truncation=_lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding="longest" , truncation=_lowercase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowercase ):
feat_extract.pad(_lowercase , padding="max_length" , truncation=_lowercase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowercase , truncation=_lowercase , )
SCREAMING_SNAKE_CASE = input_a[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowercase , )
SCREAMING_SNAKE_CASE = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowercase ) )
self.assertFalse(_inputs_have_equal_length(_lowercase ) )
def _snake_case ( self : str ):
self._check_padding(numpify=_lowercase )
def _snake_case ( self : Optional[int] ):
self._check_padding(numpify=_lowercase )
def _snake_case ( self : List[Any] ):
self._check_truncation(numpify=_lowercase )
def _snake_case ( self : Dict ):
self._check_truncation(numpify=_lowercase )
@require_torch
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="longest" , return_tensors="np" )[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="longest" , return_tensors="np" )[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.feat_extract_dict
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**_lowercase )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = [len(_lowercase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = feat_extract.pad(_lowercase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _lowercase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowercase )
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = self.feat_extract_dict
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**_lowercase )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE = [len(_lowercase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = min(_lowercase )
SCREAMING_SNAKE_CASE = feat_extract.pad(
_lowercase , padding="max_length" , max_length=_lowercase , truncation=_lowercase , return_tensors="np" )
self.assertIn("attention_mask" , _lowercase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 16 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ , lowercase__ ):
if nth_term == "":
return [""]
__SCREAMING_SNAKE_CASE : List[Any] = int(snake_case_ )
__SCREAMING_SNAKE_CASE : List[Any] = int(snake_case_ )
__SCREAMING_SNAKE_CASE : List[Any] = []
for temp in range(int(snake_case_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(snake_case_ ) )}''' if series else '''1''' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =int(input('Enter the last number (nth term) of the P-Series'))
__lowerCAmelCase : Dict =int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 696 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline
_lowerCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
_lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {"latents"}
_lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowercase = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
__lowercase = CLIPTextModel(_lowercase )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_lowercase )
__lowercase = CLIPTextModelWithProjection(_lowercase )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_lowercase )
__lowercase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> int:
'''simple docstring'''
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__lowercase = image / 2 + 0.5
if str(_lowercase ).startswith("mps" ):
__lowercase = torch.manual_seed(_lowercase )
else:
__lowercase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionXLImgaImgPipeline(**_lowercase )
__lowercase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
__lowercase = self.get_dummy_inputs(_lowercase )
__lowercase = sd_pipe(**_lowercase ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.get_dummy_components()
__lowercase = StableDiffusionXLImgaImgPipeline(**_lowercase )
__lowercase = sd_pipe.to(_lowercase )
__lowercase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
# forward without prompt embeds
__lowercase = self.get_dummy_inputs(_lowercase )
__lowercase = 3 * ["this is a negative prompt"]
__lowercase = negative_prompt
__lowercase = 3 * [inputs["prompt"]]
__lowercase = sd_pipe(**_lowercase )
__lowercase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowercase = self.get_dummy_inputs(_lowercase )
__lowercase = 3 * ["this is a negative prompt"]
__lowercase = 3 * [inputs.pop("prompt" )]
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = sd_pipe.encode_prompt(_lowercase , negative_prompt=_lowercase )
__lowercase = sd_pipe(
**_lowercase , prompt_embeds=_lowercase , negative_prompt_embeds=_lowercase , pooled_prompt_embeds=_lowercase , negative_pooled_prompt_embeds=_lowercase , )
__lowercase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase="cpu" , _lowerCamelCase=torch.floataa , _lowerCamelCase=0 ) -> List[Any]:
'''simple docstring'''
__lowercase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__lowercase = np.random.RandomState(_lowercase ).standard_normal((1, 4, 64, 64) )
__lowercase = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
__lowercase = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__lowercase = self.get_inputs(_lowercase )
__lowercase = pipe(**_lowercase ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 118 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=13 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : Optional[Any]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=99 , UpperCamelCase : Union[str, Any]=64 , UpperCamelCase : int=32 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : List[str]=4 , UpperCamelCase : Optional[Any]=37 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Any=5_12 , UpperCamelCase : Tuple=16 , UpperCamelCase : List[str]=2 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Tuple=3 , UpperCamelCase : Any=4 , UpperCamelCase : List[str]=None , ):
'''simple docstring'''
_snake_case : Union[str, Any] = parent
_snake_case : Optional[Any] = batch_size
_snake_case : Dict = seq_length
_snake_case : Tuple = is_training
_snake_case : int = use_input_mask
_snake_case : List[Any] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : List[str] = vocab_size
_snake_case : int = hidden_size
_snake_case : Dict = embedding_size
_snake_case : List[str] = num_hidden_layers
_snake_case : str = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Dict = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Dict = type_vocab_size
_snake_case : Optional[Any] = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : int = num_labels
_snake_case : int = num_choices
_snake_case : Optional[int] = scope
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Any = None
if self.use_input_mask:
_snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[Any] = None
if self.use_token_type_ids:
_snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : List[str] = None
_snake_case : Any = None
_snake_case : str = None
if self.use_labels:
_snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : int = MegatronBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : Tuple = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
_snake_case : List[Any] = model(_lowercase , token_type_ids=_lowercase )
_snake_case : Optional[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : str = MegatronBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : Optional[Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = MegatronBertForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : List[Any] = MegatronBertForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : List[str] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = MegatronBertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : int = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : int = MegatronBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : Any = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : int , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : Any = MegatronBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : int = self.num_labels
_snake_case : int = MegatronBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.num_choices
_snake_case : Optional[int] = MegatronBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : Optional[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Any = config_and_inputs
_snake_case : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ : List[str] =(
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a_ : Union[str, Any] =(
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Optional[Any] =True
# test_resize_embeddings = False
a_ : Dict =False
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=False ):
'''simple docstring'''
_snake_case : List[str] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
_snake_case : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
_snake_case : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = MegatronBertModelTester(self )
_snake_case : Any = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowercase )
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
return torch.tensor(
snake_case_ , dtype=torch.long , device=snake_case_ , )
lowerCAmelCase_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('Model is not available.' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[int] = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_snake_case : List[str] = os.path.join(os.environ['MYDIR'] , _lowercase )
_snake_case : List[Any] = MegatronBertModel.from_pretrained(_lowercase )
model.to(_lowercase )
model.half()
_snake_case : Optional[int] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
_snake_case : Optional[Any] = model(_lowercase )[0]
_snake_case : Dict = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , _lowercase )
_snake_case : str = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
_snake_case : List[Any] = output[0, ii, jj]
_snake_case : Optional[int] = expected[3 * ii + jj]
_snake_case : Dict = 'ii={} jj={} a={} b={}'.format(_lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(math.isclose(_lowercase , _lowercase , rel_tol=_lowercase , abs_tol=_lowercase ) , msg=_lowercase )
| 411 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = KandinskyInpaintPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_lowercase )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_lowercase )
return model
@property
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_lowercase , )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : Union[str, Any] , _a : List[Any] , _a : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase ) ).to(_lowercase )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowercase )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_lowercase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
_SCREAMING_SNAKE_CASE =np.ones((64, 64) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =0
if str(_lowercase ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_lowercase )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_lowercase )
_SCREAMING_SNAKE_CASE =pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_lowercase ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE =np.ones((768, 768) , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ='''a hat'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
_SCREAMING_SNAKE_CASE =KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_lowercase , image=_lowercase , mask_image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 691 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ : Optional[int] = 'src/transformers'
A_ : List[Any] = 'docs/source/en/tasks'
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
with open(snake_case_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase_: Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase_: Optional[Any] = 0
while not lines[start_index].startswith(snake_case_ ):
start_index += 1
start_index += 1
UpperCamelCase_: List[Any] = start_index
while not lines[end_index].startswith(snake_case_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
A_ : List[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ : int = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def snake_case (UpperCAmelCase__ ) -> Union[str, Any]:
UpperCamelCase_: Any = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase_: List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case_ , set() )
UpperCamelCase_: List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=False ) -> Any:
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = _find_text_in_file(
filename=os.path.join(snake_case_ , snake_case_ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase_: Dict = get_model_list_for_task(snake_case_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case_ , snake_case_ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A_ : Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 57 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _lowerCAmelCase ( _lowerCAmelCase ):
"""simple docstring"""
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = tempfile.mkdtemp()
snake_case__ = 8
# DPR tok
snake_case__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case__ = os.path.join(self.tmpdirname , """dpr_tokenizer""")
os.makedirs(_lowercase , exist_ok=_lowercase)
snake_case__ = os.path.join(_lowercase , DPR_VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
# BART tok
snake_case__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ = dict(zip(_lowercase , range(len(_lowercase))))
snake_case__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ = {"""unk_token""": """<unk>"""}
snake_case__ = os.path.join(self.tmpdirname , """bart_tokenizer""")
os.makedirs(_lowercase , exist_ok=_lowercase)
snake_case__ = os.path.join(_lowercase , BART_VOCAB_FILES_NAMES["""vocab_file"""])
snake_case__ = os.path.join(_lowercase , BART_VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_lowercase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(_lowercase))
def __magic_name__ ( self : str):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer"""))
def __magic_name__ ( self : Tuple):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer"""))
def __magic_name__ ( self : int):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = os.path.join(self.tmpdirname , """rag_tokenizer""")
snake_case__ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
snake_case__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(_lowercase)
rag_tokenizer.save_pretrained(_lowercase)
snake_case__ = RagTokenizer.from_pretrained(_lowercase , config=_lowercase)
self.assertIsInstance(new_rag_tokenizer.question_encoder , _lowercase)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , _lowercase)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = RagTokenizer.from_pretrained("""facebook/rag-token-nq""")
snake_case__ = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ = tokenizer(_lowercase)
self.assertIsNotNone(_lowercase)
@slow
def __magic_name__ ( self : List[str]):
'''simple docstring'''
snake_case__ = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""")
snake_case__ = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ = tokenizer(_lowercase)
self.assertIsNotNone(_lowercase)
| 654 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : str=3_2 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : str=1_0 , _lowerCamelCase : Optional[int]=[1_0, 2_0, 3_0, 4_0] , _lowerCamelCase : Optional[Any]=[1, 1, 2, 1] , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : List[str]=True , _lowerCamelCase : Union[str, Any]="relu" , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Tuple=None , ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Tuple = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : List[str] = num_channels
__lowerCamelCase : str = embeddings_size
__lowerCamelCase : Optional[Any] = hidden_sizes
__lowerCamelCase : Union[str, Any] = depths
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : int = hidden_act
__lowerCamelCase : Any = num_labels
__lowerCamelCase : Optional[int] = scope
__lowerCamelCase : Optional[int] = len(_lowercase )
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : int = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ):
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _snake_case ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = TFResNetModel(config=_lowercase )
__lowerCamelCase : Tuple = model(_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _snake_case ( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Tuple = TFResNetForImageClassification(_lowercase )
__lowerCamelCase : Union[str, Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = config_and_inputs
__lowerCamelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( _lowerCAmelCase,_lowerCAmelCase,unittest.TestCase ):
'''simple docstring'''
a_ : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a_ : Optional[Any] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a_ : str = False
a_ : int = False
a_ : Union[str, Any] = False
a_ : Optional[int] = False
a_ : List[str] = False
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : Any = TFResNetModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def _snake_case ( self : Tuple ):
'''simple docstring'''
pass
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(_lowercase )
__lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ):
__lowerCamelCase : Tuple = model_class(_lowercase )
__lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__lowerCamelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : int = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCamelCase : Union[str, Any] = layer_type
__lowerCamelCase : List[str] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFResNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCamelCase : List[str] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__lowerCamelCase : Any = model(**_lowercase )
# verify the logits
__lowerCamelCase : List[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowercase )
__lowerCamelCase : Union[str, Any] = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowercase , atol=1E-4 ) )
| 519 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
_lowerCAmelCase = {
'gpt-neox-20b': 2_0_4_8,
}
class __UpperCamelCase ( _lowerCAmelCase ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A=None ,_A=None ,_A=None ,_A="<|endoftext|>" ,_A="<|endoftext|>" ,_A="<|endoftext|>" ,_A=False ,**_A ,):
'''simple docstring'''
super().__init__(
_lowercase ,_lowercase ,tokenizer_file=_lowercase ,unk_token=_lowercase ,bos_token=_lowercase ,eos_token=_lowercase ,add_prefix_space=_lowercase ,**_lowercase ,)
_lowerCAmelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,_lowercase ) != add_prefix_space:
_lowerCAmelCase : int = getattr(_lowercase ,pre_tok_state.pop('type' ) )
_lowerCAmelCase : str = add_prefix_space
_lowerCAmelCase : int = pre_tok_class(**_lowercase )
_lowerCAmelCase : Optional[Any] = add_prefix_space
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
_lowerCAmelCase : str = self._tokenizer.model.save(_lowercase ,name=_lowercase )
return tuple(_lowercase )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowercase ,add_special_tokens=_lowercase ) + [self.eos_token_id] )
if len(_lowercase ) > self.model_max_length:
_lowerCAmelCase : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 259 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Any = StableUnCLIPPipeline
lowerCAmelCase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCAmelCase__ : Optional[int] = False
def _lowerCamelCase ( self : List[str] ) -> List[str]:
_lowercase : str = 32
_lowercase : List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_lowercase : str = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=_lowercase ,projection_dim=_lowercase ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) )
torch.manual_seed(0 )
_lowercase : Optional[int] = PriorTransformer(
num_attention_heads=2 ,attention_head_dim=12 ,embedding_dim=_lowercase ,num_layers=1 ,)
torch.manual_seed(0 )
_lowercase : Any = DDPMScheduler(
variance_type='fixed_small_log' ,prediction_type='sample' ,num_train_timesteps=1000 ,clip_sample=_lowercase ,clip_sample_range=5.0 ,beta_schedule='squaredcos_cap_v2' ,)
# regular denoising components
torch.manual_seed(0 )
_lowercase : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
_lowercase : Tuple = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
_lowercase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
_lowercase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=_lowercase ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) )
torch.manual_seed(0 )
_lowercase : Any = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type='projection' ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=_lowercase ,layers_per_block=1 ,upcast_attention=_lowercase ,use_linear_projection=_lowercase ,)
torch.manual_seed(0 )
_lowercase : str = DDIMScheduler(
beta_schedule='scaled_linear' ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,prediction_type='v_prediction' ,set_alpha_to_one=_lowercase ,steps_offset=1 ,)
torch.manual_seed(0 )
_lowercase : Optional[Any] = AutoencoderKL()
_lowercase : str = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def _lowerCamelCase ( self : str ,UpperCamelCase : Dict ,UpperCamelCase : List[str]=0 ) -> Optional[int]:
if str(_lowercase ).startswith('mps' ):
_lowercase : Tuple = torch.manual_seed(_lowercase )
else:
_lowercase : str = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowercase : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : Any ) -> Dict:
_lowercase : Optional[int] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def _lowerCamelCase ( self : int ) -> Optional[Any]:
_lowercase : Dict = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : Any ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Any ) -> Union[str, Any]:
_lowercase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
_lowercase : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowercase : str = pipe('anime turle' ,generator=_lowercase ,output_type='np' )
_lowercase : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase ,_lowercase )
def _lowerCamelCase ( self : Any ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : str = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' ,torch_dtype=torch.floataa )
_lowercase : List[Any] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowercase : List[Any] = pipe(
'anime turtle' ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type='np' ,)
_lowercase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 125 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['LayoutLMv2FeatureExtractor']
_SCREAMING_SNAKE_CASE = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 401 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__A : Dict = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None , __lowerCamelCase : str=1 ):
SCREAMING_SNAKE_CASE = tokenizer
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = len(_lowercase ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE = n_copies
def __iter__( self : int ):
SCREAMING_SNAKE_CASE = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
SCREAMING_SNAKE_CASE = self.tokenizer(_lowercase , padding=_lowercase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = start_length
SCREAMING_SNAKE_CASE = eof_strings
SCREAMING_SNAKE_CASE = tokenizer
def __call__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_lowercase )
def __a ( A__ : List[Any] ):
SCREAMING_SNAKE_CASE = re.split("(%s)" % "|".join(snake_case_ ) , snake_case_ )
# last string should be ""
return "".join(string_list[:-2] )
def __a ( A__ : Optional[int] , A__ : Any , A__ : str , A__ : List[str] , A__ : Any , A__ : Tuple=20 , **A__ : int ):
SCREAMING_SNAKE_CASE = defaultdict(snake_case_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(snake_case_ ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE = batch["ids"].shape[-1]
SCREAMING_SNAKE_CASE = accelerator.unwrap_model(snake_case_ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=snake_case_ , **snake_case_ )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE = batch["task_id"].repeat(snake_case_ )
SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(
snake_case_ , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(snake_case_ , snake_case_ ):
gen_token_dict[task].append(snake_case_ )
SCREAMING_SNAKE_CASE = [[] for _ in range(snake_case_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
code_gens[task].append(remove_last_block(snake_case_ ) )
return code_gens
def __a ( ):
# Setup configuration
SCREAMING_SNAKE_CASE = HfArgumentParser(snake_case_ )
SCREAMING_SNAKE_CASE = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE = "false"
if args.num_workers is None:
SCREAMING_SNAKE_CASE = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE = Accelerator()
set_seed(args.seed , device_specific=snake_case_ )
# Load model and tokenizer
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE = tokenizer.eos_token
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , snake_case_ , snake_case_ )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE = load_dataset("openai_humaneval" )
SCREAMING_SNAKE_CASE = load_metric("code_eval" )
SCREAMING_SNAKE_CASE = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
SCREAMING_SNAKE_CASE = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE = TokenizedDataset(snake_case_ , human_eval["test"] , n_copies=snake_case_ , n_tasks=snake_case_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE = DataLoader(snake_case_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE = complete_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , n_tasks=snake_case_ , batch_size=args.batch_size , **snake_case_ , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE = []
for task in tqdm(range(snake_case_ ) ):
SCREAMING_SNAKE_CASE = human_eval["test"][task]["test"]
SCREAMING_SNAKE_CASE = F"check({human_eval['test'][task]['entry_point']})"
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = code_eval_metric.compute(
references=snake_case_ , predictions=snake_case_ , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
__SCREAMING_SNAKE_CASE : str = (low + high) // 2
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = max_subarray(snake_case_ , snake_case_ , snake_case_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = max_subarray(snake_case_ , mid + 1 , snake_case_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = max_cross_sum(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = float('''-inf''' ), -1
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = float('''-inf''' ), -1
__SCREAMING_SNAKE_CASE : Any = 0
for i in range(snake_case_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
__SCREAMING_SNAKE_CASE : int = summ
__SCREAMING_SNAKE_CASE : int = i
__SCREAMING_SNAKE_CASE : int = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
__SCREAMING_SNAKE_CASE : List[str] = summ
__SCREAMING_SNAKE_CASE : Tuple = i
return max_left, max_right, (left_sum + right_sum)
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = [randint(1 , snake_case_ ) for _ in range(snake_case_ )]
__SCREAMING_SNAKE_CASE : int = time.time()
max_subarray(snake_case_ , 0 , input_size - 1 )
__SCREAMING_SNAKE_CASE : Tuple = time.time()
return end - start
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
__SCREAMING_SNAKE_CASE : Any = [time_max_subarray(snake_case_ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(snake_case_ , snake_case_ ):
print(snake_case_ , '''\t\t''' , snake_case_ )
plt.plot(snake_case_ , snake_case_ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class __a :
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
if len(_lowercase ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
__lowercase = list(_lowercase )
__lowercase = degree
def __add__( self , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
if self.degree > polynomial_a.degree:
__lowercase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _lowercase )
else:
__lowercase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _lowercase )
def __sub__( self , _lowerCamelCase ) -> Any:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> Any:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _lowercase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowercase )
return polynomial
def __repr__( self ) -> Optional[int]:
'''simple docstring'''
return self.__str__()
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = [0] * self.degree
for i in range(self.degree ):
__lowercase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _lowercase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase = 0 ) -> List[str]:
'''simple docstring'''
__lowercase = [0] * (self.degree + 2)
__lowercase = constant
for i in range(self.degree + 1 ):
__lowercase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _lowercase )
def __eq__( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return not self.__eq__(_lowercase )
| 118 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ : str =DebertaTokenizer
a_ : Any =True
a_ : Union[str, Any] =DebertaTokenizerFast
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
_snake_case : Union[str, Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_snake_case : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : Dict = {'unk_token': '[UNK]'}
_snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowercase ) )
def UpperCamelCase_ ( self : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = 'lower newer'
_snake_case : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : Optional[Any] = 'lower newer'
_snake_case : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_snake_case : Tuple = tokens + [tokenizer.unk_token]
_snake_case : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Dict = self.get_tokenizer()
_snake_case : Dict = tokenizer('Hello' , 'World' )
_snake_case : Dict = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , _lowercase )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
_snake_case : int = tokenizer.encode('sequence builders' , add_special_tokens=_lowercase )
_snake_case : Any = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowercase )
_snake_case : Tuple = tokenizer.encode(
'sequence builders' , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_snake_case : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
_snake_case : int = tokenizer.build_inputs_with_special_tokens(_lowercase )
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_snake_case : str = tokenizer_class.from_pretrained('microsoft/deberta-base' )
_snake_case : Optional[Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
_snake_case : Optional[int] = tokenizer(_lowercase , padding=_lowercase )
_snake_case : Dict = [tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) for seq in encoding['input_ids']]
# fmt: off
_snake_case : Optional[int] = {
'input_ids': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_snake_case : List[Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , _lowercase )
for expected, decoded in zip(_lowercase , _lowercase ):
self.assertEqual(_lowercase , _lowercase )
| 411 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
snake_case_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( _lowerCAmelCase ):
def __init__( self : List[Any] , _a : List[str] , _a : Optional[Any] ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self : Any , _a : int = 1 , _a : int = 100 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : Optional[float] = None , _a : bool = True , ) -> int:
"""simple docstring"""
if audio_length_in_s is None:
_SCREAMING_SNAKE_CASE =self.unet.config.sample_size / self.unet.config.sample_rate
_SCREAMING_SNAKE_CASE =audio_length_in_s * self.unet.config.sample_rate
_SCREAMING_SNAKE_CASE =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it\'s bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
_SCREAMING_SNAKE_CASE =int(_lowercase )
if sample_size % down_scale_factor != 0:
_SCREAMING_SNAKE_CASE =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
''' process.''' )
_SCREAMING_SNAKE_CASE =int(_lowercase )
_SCREAMING_SNAKE_CASE =next(iter(self.unet.parameters() ) ).dtype
_SCREAMING_SNAKE_CASE =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_SCREAMING_SNAKE_CASE =randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
# set step values
self.scheduler.set_timesteps(_lowercase , device=audio.device )
_SCREAMING_SNAKE_CASE =self.scheduler.timesteps.to(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_SCREAMING_SNAKE_CASE =self.unet(_lowercase , _lowercase ).sample
# 2. compute previous image: x_t -> t_t-1
_SCREAMING_SNAKE_CASE =self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
_SCREAMING_SNAKE_CASE =audio.clamp(-1 , 1 ).float().cpu().numpy()
_SCREAMING_SNAKE_CASE =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_lowercase )
| 691 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
UpperCamelCase_: Optional[int] = int(snake_case_ )
# Initialize Result
UpperCamelCase_: List[Any] = []
# Traverse through all denomination
for denomination in reversed(snake_case_ ):
# Find denominations
while int(snake_case_ ) >= int(snake_case_ ):
total_value -= int(snake_case_ )
answer.append(snake_case_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
A_ : str = []
A_ : Union[str, Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
A_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
A_ : Optional[Any] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
A_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
A_ : List[str] = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
A_ : Optional[int] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 57 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
def _UpperCAmelCase ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
a__ = generate_large_matrix()
a__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _UpperCAmelCase ( a : list[list[int]] ):
assert all(row == sorted(snake_case_ , reverse=snake_case_ ) for row in grid )
assert all(list(snake_case_ ) == sorted(snake_case_ , reverse=snake_case_ ) for col in zip(*snake_case_ ) )
def _UpperCAmelCase ( a : list[int] ):
snake_case__ = 0
snake_case__ = len(snake_case_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case__ = (left + right) // 2
snake_case__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case__ = mid + 1
else:
snake_case__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case_ )
def _UpperCAmelCase ( a : list[list[int]] ):
snake_case__ = 0
snake_case__ = len(grid[0] )
for i in range(len(snake_case_ ) ):
snake_case__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case_ ) * len(grid[0] )) - total
def _UpperCAmelCase ( a : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def _UpperCAmelCase ( a : list[list[int]] ):
snake_case__ = 0
for row in grid:
for i, number in enumerate(snake_case_ ):
if number < 0:
total += len(snake_case_ ) - i
break
return total
def _UpperCAmelCase ( ):
from timeit import timeit
print("""Running benchmarks""" )
snake_case__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case__ = timeit(F'''{func}(grid=grid)''' , setup=snake_case_ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 654 |
"""simple docstring"""
def lowercase__ ( snake_case_ :dict ):
__UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
for node in graph )
def lowercase__ ( snake_case_ :dict , snake_case_ :int , snake_case_ :set , snake_case_ :set ):
visited.add(snake_case_ )
rec_stk.add(snake_case_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(snake_case_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class _UpperCamelCase :
'''simple docstring'''
a_ : str
a_ : str = None
@staticmethod
def _snake_case ( ):
'''simple docstring'''
raise NotImplementedError
def _snake_case ( self : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : str , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
raise NotImplementedError
def _snake_case ( self : Tuple , _lowerCamelCase : List[str] ):
'''simple docstring'''
raise NotImplementedError
def _snake_case ( self : Any ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def _snake_case ( cls : Optional[int] ):
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class _UpperCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
a_ : Optional[Any] = "optuna"
@staticmethod
def _snake_case ( ):
'''simple docstring'''
return is_optuna_available()
def _snake_case ( self : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : str , **_lowerCamelCase : Any ):
'''simple docstring'''
return run_hp_search_optuna(_lowercase , _lowercase , _lowercase , **_lowercase )
def _snake_case ( self : Dict , _lowerCamelCase : str ):
'''simple docstring'''
return default_hp_space_optuna(_lowercase )
class _UpperCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
a_ : Optional[int] = "ray"
a_ : Union[str, Any] = "'ray[tune]'"
@staticmethod
def _snake_case ( ):
'''simple docstring'''
return is_ray_available()
def _snake_case ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : str , **_lowerCamelCase : int ):
'''simple docstring'''
return run_hp_search_ray(_lowercase , _lowercase , _lowercase , **_lowercase )
def _snake_case ( self : Optional[int] , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return default_hp_space_ray(_lowercase )
class _UpperCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
a_ : List[str] = "sigopt"
@staticmethod
def _snake_case ( ):
'''simple docstring'''
return is_sigopt_available()
def _snake_case ( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : str , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
return run_hp_search_sigopt(_lowercase , _lowercase , _lowercase , **_lowercase )
def _snake_case ( self : Dict , _lowerCamelCase : List[Any] ):
'''simple docstring'''
return default_hp_space_sigopt(_lowercase )
class _UpperCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
a_ : Optional[int] = "wandb"
@staticmethod
def _snake_case ( ):
'''simple docstring'''
return is_wandb_available()
def _snake_case ( self : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : str , **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
return run_hp_search_wandb(_lowercase , _lowercase , _lowercase , **_lowercase )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : List[str] ):
'''simple docstring'''
return default_hp_space_wandb(_lowercase )
__UpperCamelCase : str = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Dict = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_ ) > 0:
__lowerCamelCase : Dict = available_backends[0].name
if len(snake_case_ ) > 1:
logger.info(
f"""{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 519 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['PoolFormerFeatureExtractor']
_lowercase : Any = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 49 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self ,_A ,_A=7 ,_A=3 ,_A=30 ,_A=400 ,_A=True ,_A=None ,_A=0.9 ,_A=None ,_A=True ,_A=[0.5, 0.5, 0.5] ,_A=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = size if size is not None else {'shortest_edge': 30}
_lowerCAmelCase : List[str] = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Union[str, Any] = min_resolution
_lowerCAmelCase : int = max_resolution
_lowerCAmelCase : Dict = do_resize_and_center_crop
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : List[str] = crop_pct
_lowerCAmelCase : List[str] = crop_size
_lowerCAmelCase : Optional[Any] = do_normalize
_lowerCAmelCase : Dict = image_mean
_lowerCAmelCase : List[Any] = image_std
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __UpperCamelCase ( _lowerCAmelCase , unittest.TestCase ):
_UpperCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase ,'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_lowercase ,'size' ) )
self.assertTrue(hasattr(_lowercase ,'crop_pct' ) )
self.assertTrue(hasattr(_lowercase ,'do_normalize' ) )
self.assertTrue(hasattr(_lowercase ,'image_mean' ) )
self.assertTrue(hasattr(_lowercase ,'image_std' ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size ,{'height': 30, 'width': 30} )
_lowerCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,Image.Image )
# Test not batched input
_lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(_lowercase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase ,numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,np.ndarray )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : int = image_processing(_lowercase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowercase ,torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase ,torch.Tensor )
# Test not batched input
_lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_lowerCAmelCase : Optional[int] = image_processing(_lowercase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 259 |
"""simple docstring"""
def lowercase__ ( snake_case_ :Dict ): # noqa: E741
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
__UpperCAmelCase = [False] * n
def dfs(snake_case_ :Tuple , snake_case_ :Union[str, Any] , snake_case_ :Any , snake_case_ :int ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase = True
__UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase = True
else:
__UpperCAmelCase = min(low[at] , snake_case_ )
return out_edge_count
for i in range(snake_case_ ):
if not visited[i]:
__UpperCAmelCase = 0
__UpperCAmelCase = dfs(snake_case_ , snake_case_ , -1 , snake_case_ )
__UpperCAmelCase = out_edge_count > 1
for x in range(len(snake_case_ ) ):
if is_art[x] is True:
print(snake_case_ )
# Adjacency list of graph
_lowercase : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 49 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
A = False
try:
A = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] ,UpperCamelCase : str = None ,UpperCamelCase : list = [] ) -> Optional[int]:
_lowercase : Any = 0
_lowercase : Optional[int] = choices
_lowercase : Optional[int] = prompt
if sys.platform == "win32":
_lowercase : Optional[int] = '*'
else:
_lowercase : Optional[int] = '➔ '
def _lowerCamelCase ( self : Dict ,UpperCamelCase : Dict ,UpperCamelCase : str = "" ) -> int:
if sys.platform != "win32":
writeColor(self.choices[index] ,32 ,_lowercase )
else:
forceWrite(self.choices[index] ,_lowercase )
def _lowerCamelCase ( self : List[str] ,UpperCamelCase : int ) -> Dict:
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(_lowercase )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def _lowerCamelCase ( self : List[Any] ,UpperCamelCase : Direction ,UpperCamelCase : int = 1 ) -> List[Any]:
_lowercase : Dict = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _lowerCamelCase ( self : Any ) -> str:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _lowerCamelCase ( self : Any ) -> str:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _lowerCamelCase ( self : str ) -> Dict:
move_cursor(len(self.choices ) - self.position ,'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
move_cursor(len(self.choices ) - self.position ,'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def _lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
_lowercase : Optional[int] = int(chr(self.current_selection ) )
_lowercase : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,_lowercase )
else:
return
else:
return
def _lowerCamelCase ( self : str ,UpperCamelCase : int = 0 ) -> Optional[int]:
if self.prompt:
linebreak()
forceWrite(self.prompt ,'\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' ,'\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' ,'\n' )
_lowercase : Any = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position ,'UP' )
with cursor.hide():
while True:
if in_colab:
try:
_lowercase : Optional[int] = int(builtins.input() )
except ValueError:
_lowercase : Dict = default_choice
else:
_lowercase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,'UP' )
clear_line()
self.write_choice(_lowercase ,'\n' )
return choice
| 125 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49 | 0 |
def snake_case ( snake_case__ :float) -> Union[str, Any]:
return 10 - x * x
def snake_case ( snake_case__ :float , snake_case__ :float) -> int:
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case_) * equation(snake_case_) >= 0:
raise ValueError("""Wrong space!""")
_A = a
while (b - a) >= 0.01:
# Find middle point
_A = (a + b) / 2
# Check if middle point is root
if equation(snake_case_) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case_) * equation(snake_case_) < 0:
_A = c
else:
_A = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 401 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = len(snake_case_ )
__UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__UpperCAmelCase = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__UpperCAmelCase = True
if a[i].islower():
__UpperCAmelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__A : Optional[int] = TypeVar('KT')
__A : Optional[int] = TypeVar('VT')
class _SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : KT | str = "root" , __lowerCamelCase : VT | None = None ):
SCREAMING_SNAKE_CASE = key
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = []
def __repr__( self : str ):
return f"Node({self.key}: {self.value})"
@property
def _snake_case ( self : Union[str, Any] ):
return len(self.forward )
class _SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : float = 0.5 , __lowerCamelCase : int = 16 ):
SCREAMING_SNAKE_CASE = Node[KT, VT]()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = p
SCREAMING_SNAKE_CASE = max_level
def __str__( self : Dict ):
SCREAMING_SNAKE_CASE = list(self )
if len(_lowercase ) == 0:
return f"SkipList(level={self.level})"
SCREAMING_SNAKE_CASE = max((len(str(_lowercase ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE = max(_lowercase , 4 ) + 4
SCREAMING_SNAKE_CASE = self.head
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = node.forward.copy()
lines.append(f"[{node.key}]".ljust(_lowercase , "-" ) + "* " * len(_lowercase ) )
lines.append(" " * label_size + "| " * len(_lowercase ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE = node.forward[0]
lines.append(
f"[{node.key}]".ljust(_lowercase , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(_lowercase ) )
SCREAMING_SNAKE_CASE = node.forward
lines.append("None".ljust(_lowercase ) + "* " * len(_lowercase ) )
return f"SkipList(level={self.level})\n" + "\n".join(_lowercase )
def __iter__( self : int ):
SCREAMING_SNAKE_CASE = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE = node.forward[0]
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _snake_case ( self : Optional[int] , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _snake_case ( self : Optional[Any] , __lowerCamelCase : KT ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE = node.forward[i]
else:
SCREAMING_SNAKE_CASE = update_node.forward[:i]
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : KT , __lowerCamelCase : VT ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._locate_node(_lowercase )
if node is not None:
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE = level
SCREAMING_SNAKE_CASE = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
SCREAMING_SNAKE_CASE = new_node
def _snake_case ( self : List[Any] , __lowerCamelCase : VT ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
SCREAMING_SNAKE_CASE = skip_list.head
SCREAMING_SNAKE_CASE = {}
while node.level != 0:
SCREAMING_SNAKE_CASE = node.forward[0]
SCREAMING_SNAKE_CASE = node.value
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
SCREAMING_SNAKE_CASE = skip_list.head
SCREAMING_SNAKE_CASE = {}
while node.level != 0:
SCREAMING_SNAKE_CASE = node.forward[0]
SCREAMING_SNAKE_CASE = node.value
if len(snake_case_ ) != 4:
print()
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
assert skip_list.find("Some key" ) is None
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(A__ : int ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __a ( ):
def is_sorted(A__ : Union[str, Any] ):
return all(next_item >= item for item, next_item in zip(snake_case_ , lst[1:] ) )
SCREAMING_SNAKE_CASE = SkipList()
for i in range(10 ):
skip_list.insert(snake_case_ , snake_case_ )
assert is_sorted(list(snake_case_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case_ ) )
def __a ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __a ( ):
SCREAMING_SNAKE_CASE = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 16 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Tuple = inspect.getfile(accelerate.test_utils )
__SCREAMING_SNAKE_CASE : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__SCREAMING_SNAKE_CASE : Optional[Any] = test_metrics
@require_cpu
def __magic_name__( self :Union[str, Any] ) -> str:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __magic_name__( self :Dict ) -> int:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __magic_name__( self :int ) -> Optional[int]:
self.test_metrics.main()
@require_multi_gpu
def __magic_name__( self :Tuple ) -> List[str]:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_lowercase , env=os.environ.copy() )
| 696 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowercase = logging.get_logger(__name__)
_lowercase = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class __a ( _lowerCAmelCase ):
'''simple docstring'''
_lowerCamelCase : Dict = "longformer"
def __init__( self , _lowerCamelCase = 512 , _lowerCamelCase = 2 , _lowerCamelCase = 1 , _lowerCamelCase = 0 , _lowerCamelCase = 2 , _lowerCamelCase = 30_522 , _lowerCamelCase = 768 , _lowerCamelCase = 12 , _lowerCamelCase = 12 , _lowerCamelCase = 3_072 , _lowerCamelCase = "gelu" , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 512 , _lowerCamelCase = 2 , _lowerCamelCase = 0.02 , _lowerCamelCase = 1e-12 , _lowerCamelCase = False , **_lowerCamelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
__lowercase = attention_window
__lowercase = sep_token_id
__lowercase = bos_token_id
__lowercase = eos_token_id
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = onnx_export
class __a ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase = "default" , _lowerCamelCase = None ) -> Dict:
'''simple docstring'''
super().__init__(_lowercase , _lowercase , _lowercase )
__lowercase = True
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = super().outputs
if self.task == "default":
__lowercase = {0: "batch"}
return outputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Tuple:
'''simple docstring'''
__lowercase = super().generate_dummy_inputs(
preprocessor=_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
__lowercase = torch.zeros_like(inputs["input_ids"] )
# make every second token global
__lowercase = 1
return inputs
| 118 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowercase__ ( snake_case_ :list , snake_case_ :int ):
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__UpperCAmelCase , __UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
_lowercase : Any = input('Enter integers separated by spaces: ')
_lowercase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 49 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> List[str]:
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def lowerCamelCase_ ( lowerCAmelCase: Tuple )-> List[str]:
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Any = metric_id
class _lowerCAmelCase :
'''simple docstring'''
a_ : int =[MetricMock(_lowerCAmelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Optional[int] , lowerCAmelCase: str , lowerCAmelCase: Any , lowerCAmelCase: List[str] )-> Tuple:
if "tmp_path" in args:
_snake_case : List[Any] = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(snake_case_ , match='https://huggingface.co/docs/evaluate' ):
func(*snake_case_ )
| 411 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : Any = StableUnCLIPPipeline
a__ : Dict = TEXT_TO_IMAGE_PARAMS
a__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ : Optional[int] = False
def a ( self : List[str] ):
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=_lowercase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowercase , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=_lowercase , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def a ( self : str , _lowercase : Dict , _lowercase : List[str]=0 ):
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Any ):
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def a ( self : int ):
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Any ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=_lowercase , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def a ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 49 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowercase , _lowercase )
def __UpperCamelCase ( self : Optional[int] , **_a : List[str] ) -> Optional[int]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def __UpperCamelCase ( self : Optional[int] , **_a : List[str] ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_lowercase , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_SCREAMING_SNAKE_CASE ='''lower newer'''
_SCREAMING_SNAKE_CASE =processor(text=_lowercase )
_SCREAMING_SNAKE_CASE =tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_SCREAMING_SNAKE_CASE ='''lower newer'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_lowercase ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_lowercase )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =VisionTextDualEncoderProcessor(tokenizer=_lowercase , image_processor=_lowercase )
_SCREAMING_SNAKE_CASE ='''lower newer'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 691 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 49 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A_ : Union[str, Any] = logging.getLogger(__name__)
A_ : Optional[Any] = 'Hello world! cécé herlolip'
A_ : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
UpperCamelCase_: int = BertAbsConfig(
temp_dir='.' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='bert' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCamelCase_: Union[str, Any] = torch.load(snake_case_ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
UpperCamelCase_: Tuple = AbsSummarizer(snake_case_ , torch.device('cpu' ) , snake_case_ )
original.eval()
UpperCamelCase_: Optional[Any] = BertAbsSummarizer(snake_case_ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
UpperCamelCase_: str = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
UpperCamelCase_: Union[str, Any] = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(snake_case_ )) )
UpperCamelCase_: Any = torch.tensor(snake_case_ ).unsqueeze(0 )
UpperCamelCase_: str = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(snake_case_ )) )
UpperCamelCase_: int = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCamelCase_: str = encoder_input_ids
UpperCamelCase_: Dict = decoder_input_ids
UpperCamelCase_: List[str] = None
UpperCamelCase_: Tuple = None
UpperCamelCase_: Optional[int] = None
UpperCamelCase_: int = None
UpperCamelCase_: List[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCamelCase_: int = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
UpperCamelCase_: Tuple = original.generator(snake_case_ )
UpperCamelCase_: int = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
UpperCamelCase_: str = new_model.generator(snake_case_ )
UpperCamelCase_: Union[str, Any] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case_ ) )
UpperCamelCase_: str = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case_ ) )
UpperCamelCase_: Union[str, Any] = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
A_ : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 57 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_lowercase : int = {
'yjernite/retribert-base-uncased': 5_12,
}
_lowercase : Any = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_INIT_CONFIGURATION
a__ : Optional[Any] = RetriBertTokenizer
a__ : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowercase : str=None , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Optional[Any]="[UNK]" , _lowercase : int="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Any="[MASK]" , _lowercase : Optional[Any]=True , _lowercase : List[Any]=None , **_lowercase : str , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
__UpperCAmelCase = getattr(_lowercase , normalizer_state.pop('''type''' ) )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = strip_accents
__UpperCAmelCase = tokenize_chinese_chars
__UpperCAmelCase = normalizer_class(**_lowercase )
__UpperCAmelCase = do_lower_case
def a ( self : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any]=None ):
__UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 49 | 0 |
def _UpperCAmelCase ( a : int , a : int ):
while second != 0:
snake_case__ = first & second
first ^= second
snake_case__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = int(input("""Enter the first number: """).strip())
a__ = int(input("""Enter the second number: """).strip())
print(F'''{add(first, second) = }''')
| 654 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowercase : Dict = 'bart'
_lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def lowercase__ ( ):
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_lowercase ,_lowercase ,_lowercase : Dict = load_indexes()
_lowercase ,_lowercase ,_lowercase ,_lowercase : Dict = load_models()
_lowercase ,_lowercase : Tuple = load_train_data()
def lowercase__ ( snake_case_ :Tuple , snake_case_ :Any=10 ):
__UpperCAmelCase = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(snake_case_ , snake_case_ )
__UpperCAmelCase = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def lowercase__ ( snake_case_ :Any , snake_case_ :Dict="wiki40b" , snake_case_ :str="dense" , snake_case_ :Union[str, Any]=10 ):
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any] , snake_case_ :str , snake_case_ :List[Any]=64 , snake_case_ :Optional[int]=256 , snake_case_ :List[Any]=False , snake_case_ :Optional[Any]=2 , snake_case_ :Optional[Any]=0.95 , snake_case_ :List[Any]=0.8 ):
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1_024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_lowercase : Dict = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_lowercase : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowercase : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowercase : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_lowercase : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
_lowercase : Tuple = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_lowercase : List[str] = action_list.index(action_st)
_lowercase : str = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_lowercase : int = show_type == 'Show full text of passages'
else:
_lowercase : str = 3
_lowercase : List[Any] = True
_lowercase : Optional[int] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_lowercase : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_lowercase : List[str] = 'wiki40b'
_lowercase : Optional[int] = 'dense'
_lowercase : List[Any] = 'beam'
_lowercase : str = 2
_lowercase : Optional[int] = 64
_lowercase : Union[str, Any] = 2_56
_lowercase : List[str] = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
_lowercase : Tuple = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_lowercase : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_lowercase : Optional[int] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_lowercase : Optional[Any] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_lowercase : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowercase : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowercase : Dict = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowercase : Union[str, Any] = None
# start main text
_lowercase : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowercase : Optional[Any] = st.text_input('Enter your question here:', '')
else:
_lowercase : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowercase ,_lowercase : Any = make_support(question, source=wiki_source, method='dense', n_results=10)
_lowercase ,_lowercase : Union[str, Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_lowercase : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowercase : Any = support_list[:10]
_lowercase : Tuple = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_lowercase ,_lowercase : List[str] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowercase ,_lowercase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_lowercase : int = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_lowercase : Any = res[1].strip()
if sec_titles == "":
_lowercase : Dict = '[{}]({})'.format(res[0], wiki_url)
else:
_lowercase : List[Any] = sec_titles.split(' & ')
_lowercase : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_lowercase : List[Any] = find_nearest_training(question)
_lowercase : Tuple = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_lowercase : int = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_lowercase : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : List[Any] = torch.nn.Linear(1_0 , 1_0 )
__lowerCamelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
__lowerCamelCase : Optional[Any] = Accelerator()
__lowerCamelCase : int = accelerator.prepare(_lowercase )
try:
pickle.loads(pickle.dumps(_lowercase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 519 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[str] = CycleDiffusionPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
a__ : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
a__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
a__ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a ( self : Optional[int] ):
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__UpperCAmelCase = CLIPTextModel(_lowercase )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a ( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=0 ):
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image / 2 + 0.5
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a ( self : Optional[int] ):
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowercase , '''half''' ):
__UpperCAmelCase = module.half()
__UpperCAmelCase = CycleDiffusionPipeline(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = self.get_dummy_inputs(_lowercase )
__UpperCAmelCase = pipe(**_lowercase )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCAmelCase = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a ( self : Tuple ):
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a ( self : List[str] ):
return super().test_inference_batch_single_identical()
@skip_mps
def a ( self : int ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self : int ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , safety_checker=_lowercase , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__UpperCAmelCase = init_image.resize((5_12, 5_12) )
__UpperCAmelCase = '''CompVis/stable-diffusion-v1-4'''
__UpperCAmelCase = DDIMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
__UpperCAmelCase = CycleDiffusionPipeline.from_pretrained(_lowercase , scheduler=_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
__UpperCAmelCase = '''A black colored car'''
__UpperCAmelCase = '''A blue colored car'''
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , source_prompt=_lowercase , image=_lowercase , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 49 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowerCAmelCase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = cn.convert_to_negative(snake_case_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(snake_case_ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCAmelCase : str = canny.canny(snake_case_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
assert gg.gaussian_filter(snake_case_ , 5 , sigma=0.9 ).all()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_lowerCAmelCase : Any = conv.img_convolve(snake_case_ , snake_case_ ).astype(snake_case_ )
assert res.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
assert med.median_filter(snake_case_ , 3 ).any()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = sob.sobel_filter(snake_case_ )
assert grad.any() and theta.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = sp.make_sepia(snake_case_ , 20 )
assert sepia.all()
def lowerCamelCase__ ( _lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = bs.Burkes(imread(snake_case_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ ( _lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = rs.NearestNeighbour(imread(snake_case_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : int = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_lowerCAmelCase : Any = imread(snake_case_ , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Any = image[x_coordinate][y_coordinate]
_lowerCAmelCase : Optional[int] = lbp.get_neighbors_pixel(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCAmelCase : Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCAmelCase : Optional[Any] = lbp.local_binary_value(snake_case_ , snake_case_ , snake_case_ )
assert lbp_image.any()
| 259 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {'vocab_file': 'sentencepiece.model'}
_lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
_lowercase : List[str] = {
'google/rembert': 2_56,
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[Any]=False , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : str="[CLS]" , _lowercase : Dict="[SEP]" , _lowercase : Union[str, Any]="[UNK]" , _lowercase : Any="[SEP]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : Tuple="[CLS]" , _lowercase : Optional[Any]="[MASK]" , **_lowercase : str , ):
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , **_lowercase , )
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(_lowercase )
@property
def a ( self : int ):
return len(self.sp_model )
def a ( self : Tuple ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : Tuple , _lowercase : str ):
__UpperCAmelCase = d
__UpperCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def a ( self : Tuple , _lowercase : Optional[int] , _lowercase : List[Any]=False ):
__UpperCAmelCase = self.sp_model.EncodeAsPieces(_lowercase )
return pieces
def a ( self : int , _lowercase : List[str] ):
return self.sp_model.PieceToId(_lowercase )
def a ( self : List[str] , _lowercase : str ):
return self.sp_model.IdToPiece(_lowercase )
def a ( self : Any , _lowercase : Dict ):
__UpperCAmelCase = self.sp_model.decode_pieces(_lowercase )
return out_string
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Optional[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_lowercase ) )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 49 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : int = 4_00_00_00) -> Dict:
'''simple docstring'''
_lowercase : int = [0, 1]
_lowercase : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1])
if fib[i + 2] > n:
break
i += 1
_lowercase : Optional[Any] = 0
for j in range(len(snake_case_) - 1):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 125 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=10 , lowerCAmelCase_=3 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0.9 , lowerCAmelCase_=None , ) -> int:
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = patch_size
_A = tubelet_size
_A = num_frames
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = mask_ratio
_A = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_A = (image_size // patch_size) ** 2
_A = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_A = int(mask_ratio * self.seq_length )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> Union[str, Any]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = VideoMAEModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_A = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
_A = VideoMAEForPreTraining(_lowercase )
model.to(_lowercase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_A = torch.ones((self.num_masks,) )
_A = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_A = mask.expand(self.batch_size , -1 ).bool()
_A = model(_lowercase , _lowercase )
# model only returns predictions for masked patches
_A = mask.sum().item()
_A = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowerCamelCase :Dict = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase :Any = False
lowerCamelCase :int = False
lowerCamelCase :int = False
lowerCamelCase :Tuple = False
def UpperCAmelCase ( self ) -> Tuple:
_A = VideoMAEModelTester(self )
_A = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[Any]:
_A = copy.deepcopy(_lowercase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_A = torch.ones((self.model_tester.num_masks,) )
_A = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_A = mask.expand(self.model_tester.batch_size , -1 ).bool()
_A = bool_masked_pos.to(_lowercase )
if return_labels:
if model_class in [
*get_values(_lowercase ),
]:
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def UpperCAmelCase ( self ) -> int:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_lowercase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = VideoMAEModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCAmelCase ( self ) -> Tuple:
if not self.has_attentions:
pass
else:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
_A = self.model_tester.seq_length - self.model_tester.num_masks
_A = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_A = True
_A = False
_A = True
_A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_A = outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_A = outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_A = len(_lowercase )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + 1 , len(_lowercase ) )
_A = outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCAmelCase ( self ) -> Any:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_A = outputs.hidden_states
_A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowercase ) , _lowercase )
_A = self.model_tester.seq_length - self.model_tester.num_masks
_A = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self ) -> str:
pass
def snake_case ( ) -> Optional[int]:
_A = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""")
_A = np.load(snake_case_)
return list(snake_case_)
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self ) -> List[str]:
_A = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
_lowercase )
_A = self.default_image_processor
_A = prepare_video()
_A = image_processor(_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
_A = model(**_lowercase )
# verify the logits
_A = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , _lowercase )
_A = torch.tensor([0.3669, -0.0688, -0.2421] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@slow
def UpperCAmelCase ( self ) -> str:
_A = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(_lowercase )
_A = self.default_image_processor
_A = prepare_video()
_A = image_processor(_lowercase , return_tensors="""pt""" ).to(_lowercase )
# add boolean mask, indicating which patches to mask
_A = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
_A = torch.load(_lowercase )
# forward pass
with torch.no_grad():
_A = model(**_lowercase )
# verify the logits
_A = torch.Size([1, 14_08, 15_36] )
_A = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_lowercase )
self.assertEqual(outputs.logits.shape , _lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_A = torch.tensor([0.5142] , device=_lowercase )
self.assertTrue(torch.allclose(outputs.loss , _lowercase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_A = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=_lowercase ).to(
_lowercase )
with torch.no_grad():
_A = model(**_lowercase )
_A = torch.tensor(torch.tensor([0.6469] ) , device=_lowercase )
self.assertTrue(torch.allclose(outputs.loss , _lowercase , atol=1E-4 ) )
| 401 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : List[Any] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase__ ( snake_case_ :int , snake_case_ :Dict ):
if args.student_type == "roberta":
__UpperCAmelCase = False
elif args.student_type == "gpt2":
__UpperCAmelCase = False
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Union[str, Any] ):
if args.student_type == "roberta":
__UpperCAmelCase = False
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case_ , required=snake_case_ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case_ , required=snake_case_ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case_ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case_ , required=snake_case_ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case_ , type=snake_case_ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case_ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case_ , required=snake_case_ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case_ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case_ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case_ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case_ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case_ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case_ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case_ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case_ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case_ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case_ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case_ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case_ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case_ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case_ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case_ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case_ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case_ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case_ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case_ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case_ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case_ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case_ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case_ , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case_ , default=4_000 , help='''Checkpoint interval.''' )
__UpperCAmelCase = parser.parse_args()
sanity_checks(snake_case_ )
# ARGS #
init_gpu_params(snake_case_ )
set_seed(snake_case_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case_ ) , snake_case_ , indent=4 )
git_log(args.dump_path )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.student_type]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__UpperCAmelCase = tokenizer.all_special_tokens.index(snake_case_ )
__UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__UpperCAmelCase = special_tok_ids
__UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__UpperCAmelCase = pickle.load(snake_case_ )
__UpperCAmelCase = np.maximum(snake_case_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__UpperCAmelCase = 0.0 # do not predict special tokens
__UpperCAmelCase = torch.from_numpy(snake_case_ )
else:
__UpperCAmelCase = None
__UpperCAmelCase = LmSeqsDataset(params=snake_case_ , data=snake_case_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
__UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case_ )
else:
__UpperCAmelCase = student_model_class(snake_case_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case_ , snake_case_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case_ , snake_case_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__UpperCAmelCase = Distiller(
params=snake_case_ , dataset=snake_case_ , token_probs=snake_case_ , student=snake_case_ , teacher=snake_case_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 49 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__A : Union[str, Any] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 | 0 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _lowercase ( _lowerCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = "EncodecFeatureExtractor"
SCREAMING_SNAKE_CASE__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str ) -> int:
super().__init__(_lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extractor
__SCREAMING_SNAKE_CASE : List[str] = False
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Any=True ) -> Optional[int]:
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self :Any , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Union[str, Any] ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''audio''' , _lowercase )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''sampling_rate''' , _lowercase )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__SCREAMING_SNAKE_CASE : int = args[0]
__SCREAMING_SNAKE_CASE : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__SCREAMING_SNAKE_CASE : Optional[Any] = audio_inputs['''padding_mask''']
return inputs
def __magic_name__( self :str , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''audio''' , _lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__SCREAMING_SNAKE_CASE : List[str] = args[0]
__SCREAMING_SNAKE_CASE : List[str] = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def __magic_name__( self :Union[str, Any] , *lowerCAmelCase__ :int , **lowerCAmelCase__ :List[str] ) -> List[str]:
return self.tokenizer.decode(*_lowercase , **_lowercase )
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional = None ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = to_numpy(_lowercase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__SCREAMING_SNAKE_CASE : List[str] = seq_len - padding_mask.shape[-1]
__SCREAMING_SNAKE_CASE : Any = 1 - self.feature_extractor.padding_value
__SCREAMING_SNAKE_CASE : List[str] = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__SCREAMING_SNAKE_CASE : Dict = audio_values.tolist()
for i in range(_lowercase ):
__SCREAMING_SNAKE_CASE : List[str] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__SCREAMING_SNAKE_CASE : int = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 696 |
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowercase : Union[str, Any] = logging.getLogger(__name__)
_lowercase : Optional[Any] = 'Hello world! cécé herlolip'
_lowercase : str = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowercase__ ( snake_case_ :Any , snake_case_ :int ):
__UpperCAmelCase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
__UpperCAmelCase = torch.load(snake_case_ , lambda snake_case_ , snake_case_ : storage )
__UpperCAmelCase = AbsSummarizer(snake_case_ , torch.device('''cpu''' ) , snake_case_ )
original.eval()
__UpperCAmelCase = BertAbsSummarizer(snake_case_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__UpperCAmelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__UpperCAmelCase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
__UpperCAmelCase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_ )) )
__UpperCAmelCase = torch.tensor(snake_case_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__UpperCAmelCase = encoder_input_ids
__UpperCAmelCase = decoder_input_ids
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = __UpperCAmelCase = None
__UpperCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__UpperCAmelCase = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = original.generator(snake_case_ )
__UpperCAmelCase = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )[0]
__UpperCAmelCase = new_model.generator(snake_case_ )
__UpperCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(snake_case_ ) )
__UpperCAmelCase = torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowercase : List[str] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 49 | 0 |
"""simple docstring"""
def lowerCAmelCase__ ( __magic_name__ = "The quick brown fox jumps over the lazy dog" , ) ->Union[str, Any]:
__lowercase = set()
# Replace all the whitespace in our sentence
__lowercase = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(snake_case_ ) == 2_6
def lowerCAmelCase__ ( __magic_name__ = "The quick brown fox jumps over the lazy dog" , ) ->int:
__lowercase = [False] * 2_6
for char in input_str:
if char.islower():
__lowercase = True
elif char.isupper():
__lowercase = True
return all(snake_case_ )
def lowerCAmelCase__ ( __magic_name__ = "The quick brown fox jumps over the lazy dog" , ) ->Union[str, Any]:
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def lowerCAmelCase__ ( ) ->Optional[Any]:
from timeit import timeit
__lowercase = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=snake_case_ ) )
print(timeit("is_pangram_faster()" , setup=snake_case_ ) )
print(timeit("is_pangram_fastest()" , setup=snake_case_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 118 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : Dict ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : Any ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[int] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 49 | 0 |
lowerCAmelCase_ = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 411 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase__ ( snake_case_ :Dict , snake_case_ :int ):
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :str , snake_case_ :Dict , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :List[str] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowercase__ ( snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase = features.copy()
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = JsonDatasetReader(snake_case_ , features=snake_case_ , cache_dir=snake_case_ ).read()
assert isinstance(snake_case_ , snake_case_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[Any] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ , split=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Dict ):
if issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = jsonl_path
elif issubclass(snake_case_ , snake_case_ ):
__UpperCAmelCase = [jsonl_path]
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_dataset(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :str , snake_case_ :int=("train",) ):
assert isinstance(snake_case_ , snake_case_ )
for split in splits:
__UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :List[Any] , snake_case_ :Optional[Any] ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case_ , keep_in_memory=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowercase__ ( snake_case_ :List[str] , snake_case_ :List[str] , snake_case_ :int ):
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = features.copy() if features else default_expected_features
__UpperCAmelCase = (
Features({feature: Value(snake_case_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase = JsonDatasetReader({'''train''': jsonl_path} , features=snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any , snake_case_ :Optional[Any] ):
if split:
__UpperCAmelCase = {split: jsonl_path}
else:
__UpperCAmelCase = '''train'''
__UpperCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase = tmp_path / '''cache'''
__UpperCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase = JsonDatasetReader(snake_case_ , cache_dir=snake_case_ ).read()
_check_json_datasetdict(snake_case_ , snake_case_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowercase__ ( snake_case_ :Optional[int] ):
return json.load(snake_case_ )
def lowercase__ ( snake_case_ :Any ):
return [json.loads(snake_case_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : Optional[Any] , _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def a ( self : str , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json_function(_lowercase )
assert isinstance(_lowercase , _lowercase )
assert isinstance(exported_content[0] , _lowercase )
assert len(_lowercase ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def a ( self : List[Any] , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , lines=_lowercase , orient=_lowercase , num_proc=2 ).write()
buffer.seek(0 )
__UpperCAmelCase = load_json(_lowercase )
assert isinstance(_lowercase , _lowercase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_lowercase ) == 10
def a ( self : int , _lowercase : Any ):
with pytest.raises(_lowercase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_lowercase , _lowercase , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def a ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : Dict , _lowercase : str , _lowercase : str ):
__UpperCAmelCase = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
__UpperCAmelCase = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(_lowercase , _lowercase , compression=_lowercase ).write()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
with fsspec.open(_lowercase , '''rb''' , compression='''infer''' ) as f:
__UpperCAmelCase = f.read()
assert exported_content == original_content
| 49 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( _lowerCAmelCase ):
def __init__( self : Tuple , _a : Distribution , _a : List[str]=None , _a : str=None , _a : Tuple=0 ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1.0 if scale is None else scale
_SCREAMING_SNAKE_CASE =0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self : List[str] , _a : int , _a : Dict[str, int] , _a : Callable[..., Tuple[torch.Tensor]] , **_a : Tuple ) -> int:
"""simple docstring"""
super().__init__(**_lowercase )
_SCREAMING_SNAKE_CASE =args_dim
_SCREAMING_SNAKE_CASE =nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
_SCREAMING_SNAKE_CASE =domain_map
def __UpperCamelCase ( self : int , _a : torch.Tensor ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE =function
def __UpperCamelCase ( self : Union[str, Any] , _a : Tuple , *_a : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.function(_lowercase , *_lowercase )
class A__ :
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
def __init__( self : List[str] , _a : int = 1 ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dim
_SCREAMING_SNAKE_CASE ={k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCamelCase ( self : List[str] , _a : Optional[Any] ) -> Any:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def __UpperCamelCase ( self : Optional[int] , _a : str , _a : Optional[torch.Tensor] = None , _a : Optional[torch.Tensor] = None , ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return len(self.event_shape )
@property
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 0.0
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCamelCase ( self : List[str] , *_a : torch.Tensor ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def __UpperCamelCase ( _a : torch.Tensor ) -> Dict:
"""simple docstring"""
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class A__ ( _lowerCAmelCase ):
UpperCAmelCase = {"df": 1, "loc": 1, "scale": 1}
UpperCAmelCase = StudentT
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , _a : torch.Tensor , _a : torch.Tensor , _a : torch.Tensor ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
_SCREAMING_SNAKE_CASE =2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( _lowerCAmelCase ):
UpperCAmelCase = {"loc": 1, "scale": 1}
UpperCAmelCase = Normal
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : torch.Tensor , _a : torch.Tensor ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( _lowerCAmelCase ):
UpperCAmelCase = {"total_count": 1, "logits": 1}
UpperCAmelCase = NegativeBinomial
@classmethod
def __UpperCamelCase ( cls : Tuple , _a : torch.Tensor , _a : torch.Tensor ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCamelCase ( self : Optional[Any] , _a : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : Optional[torch.Tensor] = None , _a : Optional[torch.Tensor] = None ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 691 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 0 |
from __future__ import annotations
import math
def snake_case (UpperCAmelCase__ ) -> Any:
if num <= 0:
UpperCamelCase_: str = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(snake_case_ )
UpperCamelCase_: List[Any] = [True] * (num + 1)
UpperCamelCase_: List[Any] = []
UpperCamelCase_: int = 2
UpperCamelCase_: List[str] = int(math.sqrt(snake_case_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(snake_case_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , snake_case_ ):
if sieve[i] is True:
UpperCamelCase_: Optional[int] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(snake_case_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 57 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.