code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _A ( unittest.TestCase ):
def __init__( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : int=13 , __magic_name__ : List[str]=7 , __magic_name__ : int=True , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : str=True , __magic_name__ : Union[str, Any]=99 , __magic_name__ : Dict=32 , __magic_name__ : Union[str, Any]=5 , __magic_name__ : str=4 , __magic_name__ : Optional[Any]=37 , __magic_name__ : List[str]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : int=5_12 , __magic_name__ : str=16 , __magic_name__ : Any=2 , __magic_name__ : str=0.02 , __magic_name__ : Any=4 , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : List[str] = seq_length
__snake_case : Optional[Any] = is_training
__snake_case : Optional[Any] = use_attention_mask
__snake_case : int = use_token_type_ids
__snake_case : Tuple = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : Optional[int] = type_sequence_label_size
__snake_case : Tuple = initializer_range
__snake_case : int = num_choices
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = None
if self.use_attention_mask:
__snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = None
if self.use_token_type_ids:
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Any = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__snake_case : str = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = config_and_inputs
__snake_case : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _A ( __lowercase , unittest.TestCase ):
lowercase__: str = True
lowercase__: Tuple = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = FlaxRoFormerModelTester(self )
@slow
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
__snake_case : Optional[Any] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__magic_name__ )
__snake_case : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
@require_flax
class _A ( unittest.TestCase ):
@slow
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__snake_case : Optional[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
__snake_case : int = model(__magic_name__ )[0]
__snake_case : Union[str, Any] = 5_00_00
__snake_case : Dict = (1, 6, vocab_size)
self.assertEqual(output.shape , __magic_name__ )
__snake_case : Optional[int] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 26 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
__UpperCamelCase = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__UpperCamelCase = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
__UpperCamelCase = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : int=None , __magic_name__ : Dict=None , __magic_name__ : Union[str, Any]=False ) -> str:
"""simple docstring"""
if concatenate_texts:
return compute_measures(__magic_name__ , __magic_name__ )["wer"]
else:
__snake_case : Union[str, Any] = 0
__snake_case : Tuple = 0
for prediction, reference in zip(__magic_name__ , __magic_name__ ):
__snake_case : Dict = compute_measures(__magic_name__ , __magic_name__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 26 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class _A ( __lowercase ):
lowercase__: Dict = '''imagegpt'''
lowercase__: Optional[int] = ['''past_key_values''']
lowercase__: Union[str, Any] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , __magic_name__ : str=5_12 + 1 , __magic_name__ : Union[str, Any]=32 * 32 , __magic_name__ : List[Any]=5_12 , __magic_name__ : Any=24 , __magic_name__ : List[str]=8 , __magic_name__ : int=None , __magic_name__ : Tuple="quick_gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Union[str, Any]=1E-5 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=True , __magic_name__ : Any=False , __magic_name__ : Optional[Any]=False , __magic_name__ : Dict=False , **__magic_name__ : Dict , ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = vocab_size
__snake_case : Optional[Any] = n_positions
__snake_case : int = n_embd
__snake_case : Dict = n_layer
__snake_case : Union[str, Any] = n_head
__snake_case : Union[str, Any] = n_inner
__snake_case : List[Any] = activation_function
__snake_case : int = resid_pdrop
__snake_case : Tuple = embd_pdrop
__snake_case : List[str] = attn_pdrop
__snake_case : Tuple = layer_norm_epsilon
__snake_case : Dict = initializer_range
__snake_case : str = scale_attn_weights
__snake_case : Dict = use_cache
__snake_case : Union[str, Any] = scale_attn_by_inverse_layer_idx
__snake_case : int = reorder_and_upcast_attn
__snake_case : Optional[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=__magic_name__ , **__magic_name__ )
class _A ( __lowercase ):
@property
def lowercase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowercase__ ( self : Any , __magic_name__ : "FeatureExtractionMixin" , __magic_name__ : int = 1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , __magic_name__ : int = 3 , __magic_name__ : int = 32 , __magic_name__ : int = 32 , ) -> Mapping[str, Any]:
"""simple docstring"""
__snake_case : int = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = dict(preprocessor(images=__magic_name__ , return_tensors=__magic_name__ ) )
return inputs
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : int , **__magic_name__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _A ( __lowercase ):
lowercase__: Optional[Any] = '''visual_bert'''
def __init__( self : Union[str, Any] , __magic_name__ : Any=3_05_22 , __magic_name__ : Union[str, Any]=7_68 , __magic_name__ : Any=5_12 , __magic_name__ : Dict=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Optional[Any]=30_72 , __magic_name__ : Tuple="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : List[str]=5_12 , __magic_name__ : int=2 , __magic_name__ : str=0.02 , __magic_name__ : Dict=1E-12 , __magic_name__ : Any=False , __magic_name__ : List[str]=True , __magic_name__ : Union[str, Any]=1 , __magic_name__ : Dict=0 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Tuple , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
__snake_case : Dict = vocab_size
__snake_case : Tuple = max_position_embeddings
__snake_case : int = hidden_size
__snake_case : Dict = visual_embedding_dim
__snake_case : Tuple = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : int = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[Any] = type_vocab_size
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Any = bypass_transformer
__snake_case : Dict = special_visual_initialize
| 26 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
'''simple docstring'''
from ... import PretrainedConfig
__UpperCamelCase = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _A ( __lowercase ):
lowercase__: List[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase__: Optional[Any] = '''nezha'''
def __init__( self : List[Any] , __magic_name__ : str=2_11_28 , __magic_name__ : Tuple=7_68 , __magic_name__ : int=12 , __magic_name__ : List[str]=12 , __magic_name__ : List[str]=30_72 , __magic_name__ : Tuple="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : int=5_12 , __magic_name__ : List[str]=64 , __magic_name__ : Optional[Any]=2 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=1E-12 , __magic_name__ : Any=0.1 , __magic_name__ : str=0 , __magic_name__ : List[Any]=2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=True , **__magic_name__ : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
__snake_case : Tuple = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : int = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : str = max_relative_position
__snake_case : Optional[int] = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : Optional[int] = layer_norm_eps
__snake_case : Dict = classifier_dropout
__snake_case : List[str] = use_cache
| 26 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 1 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *_lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=True , _lowerCamelCase=2 ) -> Union[str, Any]:
"""simple docstring"""
from .. import __version__
__snake_case : Optional[Any] = take_from
__snake_case : Union[str, Any] = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case : Union[str, Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Dict = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case : str = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : List[str] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : Union[str, Any] = call_frame.lineno
__snake_case : Union[str, Any] = call_frame.function
__snake_case , __snake_case : str = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 26 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _A ( __lowercase ):
lowercase__: torch.FloatTensor
lowercase__: torch.FloatTensor
class _A ( __lowercase , __lowercase ):
lowercase__: List[Any] = 1
@register_to_config
def __init__( self : Optional[Any] , __magic_name__ : int = 20_00 , __magic_name__ : float = 0.15 , __magic_name__ : float = 0.01 , __magic_name__ : float = 1348.0 , __magic_name__ : float = 1E-5 , __magic_name__ : int = 1 , ) -> int:
"""simple docstring"""
__snake_case : Dict = sigma_max
# setable values
__snake_case : Optional[Any] = None
self.set_sigmas(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Tuple , __magic_name__ : torch.FloatTensor , __magic_name__ : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase__ ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : float = None , __magic_name__ : Union[str, torch.device] = None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__snake_case : List[str] = torch.linspace(1 , __magic_name__ , __magic_name__ , device=__magic_name__ )
def lowercase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : float = None , __magic_name__ : float = None , __magic_name__ : float = None ) -> int:
"""simple docstring"""
__snake_case : Any = sigma_min if sigma_min is not None else self.config.sigma_min
__snake_case : Optional[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
__snake_case : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__snake_case : Any = torch.exp(torch.linspace(math.log(__magic_name__ ) , math.log(__magic_name__ ) , __magic_name__ ) )
__snake_case : List[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : int , __magic_name__ : torch.FloatTensor , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : bool = True , ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
__snake_case : List[str] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__snake_case : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__snake_case : int = timesteps.to(self.discrete_sigmas.device )
__snake_case : Dict = self.discrete_sigmas[timesteps].to(sample.device )
__snake_case : Union[str, Any] = self.get_adjacent_sigma(__magic_name__ , __magic_name__ ).to(sample.device )
__snake_case : List[str] = torch.zeros_like(__magic_name__ )
__snake_case : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__snake_case : int = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__snake_case : List[str] = diffusion.unsqueeze(-1 )
__snake_case : Optional[int] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__snake_case : Any = randn_tensor(
sample.shape , layout=sample.layout , generator=__magic_name__ , device=sample.device , dtype=sample.dtype )
__snake_case : Union[str, Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__snake_case : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__magic_name__ , prev_sample_mean=__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : torch.FloatTensor , __magic_name__ : torch.FloatTensor , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__snake_case : str = randn_tensor(sample.shape , layout=sample.layout , generator=__magic_name__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__snake_case : List[str] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case : Optional[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__snake_case : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__snake_case : Union[str, Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__snake_case : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__snake_case : int = step_size.unsqueeze(-1 )
__snake_case : Any = sample + step_size * model_output
__snake_case : List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : torch.FloatTensor , __magic_name__ : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
__snake_case : Union[str, Any] = timesteps.to(original_samples.device )
__snake_case : Union[str, Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
__snake_case : Union[str, Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__magic_name__ ) * sigmas[:, None, None, None]
)
__snake_case : Union[str, Any] = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.config.num_train_timesteps
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
__snake_case : Any = u
for i in range(1 , _lowerCamelCase ):
__snake_case : Optional[Any] = temp * (u - i)
return temp
def _a ( ) -> None:
"""simple docstring"""
__snake_case : str = int(input("""enter the numbers of values: """ ) )
__snake_case : list[list[float]] = []
for _ in range(_lowerCamelCase ):
y.append([] )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
y[i].append(_lowerCamelCase )
__snake_case : Any = 0
print("""enter the values of parameters in a list: """ )
__snake_case : Dict = list(map(_lowerCamelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(_lowerCamelCase ):
__snake_case : Optional[int] = float(input() )
__snake_case : List[str] = int(input("""enter the value to interpolate: """ ) )
__snake_case : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCamelCase ):
for j in range(n - i ):
__snake_case : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
__snake_case : Union[str, Any] = y[0][0]
for i in range(1 , _lowerCamelCase ):
summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Tuple = AutoencoderKL
lowercase__: int = '''sample'''
lowercase__: Dict = 1e-2
@property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = 4
__snake_case : Optional[Any] = 3
__snake_case : Optional[Any] = (32, 32)
__snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ )
return {"sample": image}
@property
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__snake_case : List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = self.prepare_init_args_and_inputs_for_common()
__snake_case : int = self.model_class(**__magic_name__ )
model.to(__magic_name__ )
assert not model.is_gradient_checkpointing and model.training
__snake_case : Any = model(**__magic_name__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : List[str] = torch.randn_like(__magic_name__ )
__snake_case : Dict = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**__magic_name__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__magic_name__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : Dict = model_a(**__magic_name__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Optional[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Any = dict(model.named_parameters() )
__snake_case : Any = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__magic_name__ )
__snake_case : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
__snake_case : Optional[int] = model.to(__magic_name__ )
model.eval()
if torch_device == "mps":
__snake_case : Optional[Any] = torch.manual_seed(0 )
else:
__snake_case : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(0 )
__snake_case : Optional[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Any = image.to(__magic_name__ )
with torch.no_grad():
__snake_case : Any = model(__magic_name__ , sample_posterior=__magic_name__ , generator=__magic_name__ ).sample
__snake_case : int = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Optional[int] = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__snake_case : List[Any] = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__snake_case : str = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-2 ) )
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : str , __magic_name__ : Tuple , __magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__magic_name__ ) for s in shape] )}.npy'''
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Tuple , __magic_name__ : str=0 , __magic_name__ : Tuple=(4, 3, 5_12, 5_12) , __magic_name__ : Optional[int]=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : str = torch.from_numpy(load_hf_numpy(self.get_file_format(__magic_name__ , __magic_name__ ) ) ).to(__magic_name__ ).to(__magic_name__ )
return image
def lowercase__ ( self : Dict , __magic_name__ : Any="CompVis/stable-diffusion-v1-4" , __magic_name__ : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = """fp16""" if fpaa else None
__snake_case : Tuple = torch.floataa if fpaa else torch.floataa
__snake_case : Dict = AutoencoderKL.from_pretrained(
__magic_name__ , subfolder="""vae""" , torch_dtype=__magic_name__ , revision=__magic_name__ , )
model.to(__magic_name__ ).eval()
return model
def lowercase__ ( self : Tuple , __magic_name__ : Tuple=0 ) -> int:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(__magic_name__ )
return torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowercase__ ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(__magic_name__ )
__snake_case : Tuple = self.get_generator(__magic_name__ )
with torch.no_grad():
__snake_case : List[str] = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : str = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowercase__ ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.get_sd_vae_model(fpaa=__magic_name__ )
__snake_case : int = self.get_sd_image(__magic_name__ , fpaa=__magic_name__ )
__snake_case : Union[str, Any] = self.get_generator(__magic_name__ )
with torch.no_grad():
__snake_case : List[str] = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample
assert sample.shape == image.shape
__snake_case : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : int = torch.tensor(__magic_name__ )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(__magic_name__ )
with torch.no_grad():
__snake_case : str = model(__magic_name__ ).sample
assert sample.shape == image.shape
__snake_case : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : Tuple = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : Dict = model.decode(__magic_name__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__snake_case : int = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[Any] = torch.tensor(__magic_name__ )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowercase__ ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.get_sd_vae_model(fpaa=__magic_name__ )
__snake_case : Optional[Any] = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ )
with torch.no_grad():
__snake_case : List[str] = model.decode(__magic_name__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(__magic_name__ )
assert torch_all_close(__magic_name__ , __magic_name__ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def lowercase__ ( self : Any , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = self.get_sd_vae_model(fpaa=__magic_name__ )
__snake_case : Optional[int] = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ )
with torch.no_grad():
__snake_case : Any = model.decode(__magic_name__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : int = model.decode(__magic_name__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def lowercase__ ( self : Dict , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = self.get_sd_vae_model()
__snake_case : Dict = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[str] = model.decode(__magic_name__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : List[Any] = model.decode(__magic_name__ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = self.get_sd_vae_model()
__snake_case : Optional[int] = self.get_sd_image(__magic_name__ )
__snake_case : List[Any] = self.get_generator(__magic_name__ )
with torch.no_grad():
__snake_case : List[Any] = model.encode(__magic_name__ ).latent_dist
__snake_case : Optional[Any] = dist.sample(generator=__magic_name__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : int = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Tuple = torch.tensor(__magic_name__ )
__snake_case : Any = 3E-3 if torch_device != """mps""" else 1E-2
assert torch_all_close(__magic_name__ , __magic_name__ , atol=__magic_name__ )
| 26 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = dataset
__snake_case : str = process
__snake_case : Union[str, Any] = params
def __len__( self : int ) -> Any:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Any , __magic_name__ : List[str] ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.dataset[i]
__snake_case : Dict = self.process(__magic_name__ , **self.params )
return processed
class _A ( __lowercase ):
def __init__( self : List[str] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Optional[int]=None ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = loader
__snake_case : List[Any] = infer
__snake_case : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__snake_case : Any = None
__snake_case : Tuple = loader_batch_size
# Internal bookkeeping
__snake_case : Union[str, Any] = None
__snake_case : Optional[Any] = None
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return len(self.loader )
def __iter__( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = iter(self.loader )
return self
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__snake_case : Optional[Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(__magic_name__ , __magic_name__ ):
# Convert ModelOutput to tuple first
__snake_case : int = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__snake_case : List[str] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(__magic_name__ , __magic_name__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__snake_case : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__snake_case : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : List[str] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__snake_case : int = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__snake_case : Union[str, Any] = self._loader_batch_data.__class__(__magic_name__ )
self._loader_batch_index += 1
return result
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__snake_case : List[Any] = next(self.iterator )
__snake_case : Union[str, Any] = self.infer(__magic_name__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(__magic_name__ , torch.Tensor ):
__snake_case : List[Any] = processed
else:
__snake_case : Optional[Any] = list(processed.keys() )[0]
__snake_case : Dict = processed[key]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : List[str] = len(__magic_name__ )
else:
__snake_case : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : Optional[int] = observed_batch_size
# Setting internal index to unwrap the batch
__snake_case : Union[str, Any] = processed
__snake_case : Any = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _A ( __lowercase ):
def __init__( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : int=None ) -> Tuple:
"""simple docstring"""
super().__init__(__magic_name__ , __magic_name__ , __magic_name__ )
def __iter__( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = iter(self.loader )
__snake_case : List[str] = None
return self
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
if self.subiterator is None:
__snake_case : Any = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__snake_case : List[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__snake_case : List[Any] = self.infer(next(self.iterator ) , **self.params )
__snake_case : List[str] = next(self.subiterator )
return processed
class _A ( __lowercase ):
def __iter__( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = iter(self.loader )
return self
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Dict = False
__snake_case : Union[str, Any] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__snake_case : Union[str, Any] = self.loader_batch_item()
__snake_case : List[str] = item.pop("""is_last""" )
accumulator.append(__magic_name__ )
if is_last:
return accumulator
while not is_last:
__snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(__magic_name__ , torch.Tensor ):
__snake_case : str = processed
else:
__snake_case : Optional[Any] = list(processed.keys() )[0]
__snake_case : List[str] = processed[key]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = len(__magic_name__ )
else:
__snake_case : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : List[str] = observed_batch_size
__snake_case : List[Any] = processed
__snake_case : str = 0
while self._loader_batch_index < self.loader_batch_size:
__snake_case : List[Any] = self.loader_batch_item()
__snake_case : List[Any] = item.pop("""is_last""" )
accumulator.append(__magic_name__ )
if is_last:
return accumulator
else:
__snake_case : Tuple = processed
__snake_case : List[Any] = item.pop("""is_last""" )
accumulator.append(__magic_name__ )
return accumulator
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : Dataset , __magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : List[Any] = dataset
__snake_case : Optional[int] = key
def __len__( self : str ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Optional[Any] , __magic_name__ : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.dataset[i][self.key]
class _A ( __lowercase ):
def __init__( self : Optional[Any] , __magic_name__ : Dataset , __magic_name__ : str , __magic_name__ : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = dataset
__snake_case : Any = keya
__snake_case : int = keya
def __len__( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Union[str, Any] , __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 26 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__UpperCamelCase = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _A :
def __init__( self : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=16 , __magic_name__ : Optional[Any]=13 , __magic_name__ : List[str]=7 , __magic_name__ : Union[str, Any]=14 , __magic_name__ : Any=10 , __magic_name__ : Tuple=19 , __magic_name__ : int=5 , __magic_name__ : Any=4 , __magic_name__ : Tuple=True , __magic_name__ : int=16 , __magic_name__ : Any=2 , __magic_name__ : List[str]=4 , __magic_name__ : str=4 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[int]=[1, 2, 3, 4, 5] , __magic_name__ : Union[str, Any]=25 , __magic_name__ : Union[str, Any]=5 , ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = d_model
__snake_case : int = parent
__snake_case : str = batch_size
__snake_case : Optional[Any] = prediction_length
__snake_case : str = context_length
__snake_case : str = cardinality
__snake_case : List[Any] = num_time_features
__snake_case : Union[str, Any] = lags_sequence
__snake_case : str = embedding_dimension
__snake_case : Optional[int] = is_training
__snake_case : Tuple = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : str = context_length
__snake_case : int = prediction_length + label_length
__snake_case : Optional[Any] = label_length
__snake_case : List[str] = moving_average
__snake_case : str = autocorrelation_factor
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = config.context_length + max(config.lags_sequence )
__snake_case : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__snake_case : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__snake_case : List[str] = floats_tensor([self.batch_size, _past_length] )
__snake_case : List[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__snake_case : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__snake_case : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
__snake_case : Union[str, Any] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.get_config()
__snake_case : Any = self.prepare_autoformer_inputs_dict(__magic_name__ )
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case , __snake_case : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoformerModel(config=__magic_name__ ).to(__magic_name__ ).eval()
__snake_case : Tuple = model(**__magic_name__ )
__snake_case : Optional[int] = outputs.encoder_last_hidden_state
__snake_case : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[str] = model.get_encoder()
encoder.save_pretrained(__magic_name__ )
__snake_case : List[str] = AutoformerEncoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = model.create_network_inputs(**__magic_name__ )
__snake_case , __snake_case : List[str] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__snake_case : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__snake_case : Optional[int] = encoder(inputs_embeds=__magic_name__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__snake_case : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__snake_case : Any = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__snake_case : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__snake_case : Optional[int] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = model.get_decoder()
decoder.save_pretrained(__magic_name__ )
__snake_case : Dict = AutoformerDecoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
__snake_case : str = decoder(
trend=__magic_name__ , inputs_embeds=__magic_name__ , encoder_hidden_states=__magic_name__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__: int = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__: int = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
lowercase__: int = False
lowercase__: Optional[int] = False
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: int = False
lowercase__: List[str] = False
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = AutoformerModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : int = model_class(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
__snake_case , __snake_case : str = model_class.from_pretrained(__magic_name__ , output_loading_info=__magic_name__ )
self.assertEqual(info["""missing_keys"""] , [] )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__magic_name__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = inspect.signature(getattr(__magic_name__ , """forward""" ) )
# The main input is the name of the argument after `self`
__snake_case : List[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __magic_name__ )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(__magic_name__ )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : Any = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__magic_name__ )] , __magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Any = True
__snake_case : Union[str, Any] = getattr(self.model_tester , """seq_length""" , __magic_name__ )
__snake_case : str = getattr(self.model_tester , """decoder_seq_length""" , __magic_name__ )
__snake_case : Tuple = getattr(self.model_tester , """encoder_seq_length""" , __magic_name__ )
__snake_case : Tuple = getattr(self.model_tester , """d_model""" , __magic_name__ )
__snake_case : str = getattr(self.model_tester , """num_attention_heads""" , __magic_name__ )
__snake_case : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__snake_case : Optional[int] = True
__snake_case : List[Any] = False
__snake_case : Optional[Any] = True
__snake_case : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : str = True
__snake_case : Optional[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : List[Any] = outputs.encoder_attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__snake_case : Union[str, Any] = len(__magic_name__ )
__snake_case : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__magic_name__ , __magic_name__ )
# decoder attentions
__snake_case : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__snake_case : str = outputs.cross_attentions
self.assertIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__snake_case : str = True
__snake_case : int = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 2 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _a ( _lowerCamelCase="train-batch.pt" ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCamelCase , repo_type="""dataset""" )
__snake_case : Union[str, Any] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : int = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__magic_name__ )
__snake_case : str = prepare_batch()
with torch.no_grad():
__snake_case : Any = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__snake_case : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __magic_name__ )
__snake_case : Any = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__magic_name__ )
__snake_case : Optional[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__snake_case : Optional[int] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__snake_case : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __magic_name__ )
__snake_case : Tuple = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Any = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__magic_name__ )
__snake_case : Tuple = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__snake_case : Any = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__snake_case : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __magic_name__ )
__snake_case : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__magic_name__ )
__snake_case : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __magic_name__ , rtol=1E-1 ) )
| 26 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase = [0, 25, 50]
__UpperCamelCase = [25, 50, 75]
__UpperCamelCase = fuzz.membership.trimf(X, abca)
__UpperCamelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase = np.ones(75)
__UpperCamelCase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 26 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: # noqa: E741
"""simple docstring"""
while r - l > 1:
__snake_case : str = (l + r) // 2
if v[m] >= key:
__snake_case : Any = m
else:
__snake_case : Union[str, Any] = m # noqa: E741
return r
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
return 0
__snake_case : Optional[Any] = [0] * len(_lowerCamelCase )
__snake_case : Dict = 1
__snake_case : Any = v[0]
for i in range(1 , len(_lowerCamelCase ) ):
if v[i] < tail[0]:
__snake_case : List[str] = v[i]
elif v[i] > tail[length - 1]:
__snake_case : Any = v[i]
length += 1
else:
__snake_case : Optional[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 1 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _A ( enum.Enum ):
lowercase__: Any = 0
lowercase__: Optional[Any] = 1
lowercase__: List[Any] = 2
@add_end_docstrings(__lowercase )
class _A ( __lowercase ):
lowercase__: List[Any] = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : int , *__magic_name__ : Tuple , **__magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__snake_case : Union[str, Any] = None
if self.model.config.prefix is not None:
__snake_case : Optional[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__snake_case : Union[str, Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__snake_case , __snake_case , __snake_case : int = self._sanitize_parameters(prefix=__magic_name__ , **self._forward_params )
__snake_case : Optional[Any] = {**self._preprocess_params, **preprocess_params}
__snake_case : Dict = {**self._forward_params, **forward_params}
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Any=None , __magic_name__ : str=None , __magic_name__ : Dict=None , __magic_name__ : Dict=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : List[str] , ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = {}
if prefix is not None:
__snake_case : Tuple = prefix
if prefix:
__snake_case : List[str] = self.tokenizer(
__magic_name__ , padding=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=self.framework )
__snake_case : Optional[int] = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""" )
__snake_case : Optional[Any] = handle_long_generation
preprocess_params.update(__magic_name__ )
__snake_case : Dict = generate_kwargs
__snake_case : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
__snake_case : Union[str, Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
__snake_case : Union[str, Any] = ReturnType.TENSORS
if return_type is not None:
__snake_case : Dict = return_type
if clean_up_tokenization_spaces is not None:
__snake_case : Any = clean_up_tokenization_spaces
if stop_sequence is not None:
__snake_case : List[str] = self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
if len(__magic_name__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
__snake_case : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase__ ( self : str , *__magic_name__ : Optional[int] , **__magic_name__ : int ) -> int:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*__magic_name__ , **__magic_name__ )
def __call__( self : int , __magic_name__ : List[str] , **__magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
return super().__call__(__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : List[str]="" , __magic_name__ : List[str]=None , **__magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=self.framework )
__snake_case : Optional[int] = prompt_text
if handle_long_generation == "hole":
__snake_case : List[str] = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
__snake_case : List[Any] = generate_kwargs["""max_new_tokens"""]
else:
__snake_case : Tuple = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__snake_case : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
__snake_case : Dict = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
__snake_case : int = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , **__magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Any = model_inputs["""input_ids"""]
__snake_case : List[Any] = model_inputs.get("""attention_mask""" , __magic_name__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
__snake_case : List[str] = None
__snake_case : Optional[int] = None
__snake_case : List[str] = 1
else:
__snake_case : Optional[int] = input_ids.shape[0]
__snake_case : Union[str, Any] = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__snake_case : Tuple = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
__snake_case : str = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
__snake_case : List[str] = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__snake_case : Dict = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__snake_case : Any = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ , **__magic_name__ )
__snake_case : Any = generated_sequence.shape[0]
if self.framework == "pt":
__snake_case : List[str] = generated_sequence.reshape(__magic_name__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__snake_case : List[Any] = tf.reshape(__magic_name__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowercase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Any=ReturnType.FULL_TEXT , __magic_name__ : str=True ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = model_outputs["""generated_sequence"""][0]
__snake_case : Union[str, Any] = model_outputs["""input_ids"""]
__snake_case : str = model_outputs["""prompt_text"""]
__snake_case : List[Any] = generated_sequence.numpy().tolist()
__snake_case : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__snake_case : Tuple = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__snake_case : Tuple = self.tokenizer.decode(
__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__snake_case : List[str] = 0
else:
__snake_case : Tuple = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , ) )
if return_type == ReturnType.FULL_TEXT:
__snake_case : List[str] = prompt_text + text[prompt_length:]
else:
__snake_case : Dict = text[prompt_length:]
__snake_case : Union[str, Any] = {"""generated_text""": all_text}
records.append(__magic_name__ )
return records
| 26 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 1 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 1 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case : List[Any] = old_name.split(""".""" )
if layer == "0":
__snake_case : int = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
__snake_case : List[str] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
__snake_case : List[str] = old_name.replace("""3""" , """convolution2""" )
else:
__snake_case : str = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" , _lowerCamelCase ):
__snake_case : str = R"""\b\d{2}\b"""
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case : int = re.search(R"""\d\.\d\d.""" , _lowerCamelCase ).group()
else:
__snake_case : List[Any] = re.search(R"""\d\.\d.""" , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case : Dict = old_name.replace(_lowerCamelCase , """""" )
__snake_case : Any = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
__snake_case : Optional[int] = """intermediate_stages.""" + trimmed_name
else:
__snake_case : Dict = old_name.replace(_lowerCamelCase , """""" )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case : Optional[Any] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
__snake_case : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case : Optional[int] = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
__snake_case : int = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
__snake_case : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
__snake_case : List[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
__snake_case : List[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
__snake_case : Any = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" , _lowerCamelCase ):
__snake_case : int = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
__snake_case : Optional[Any] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case : Tuple = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case : Dict = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
__snake_case : str = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
__snake_case : str = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
__snake_case : Union[str, Any] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
__snake_case : int = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case : Tuple = new_name.replace("""norm""" , """layernorm""" )
__snake_case : Dict = """efficientformer.""" + new_name
else:
__snake_case : str = """efficientformer.encoder.""" + new_name
return new_name
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for key in checkpoint.copy().keys():
__snake_case : int = checkpoint.pop(_lowerCamelCase )
__snake_case : int = val
return checkpoint
def _a ( ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : List[Any] = torch.load(_lowerCamelCase , map_location="""cpu""" )["""model"""]
__snake_case : Union[str, Any] = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case : Dict = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case : List[Any] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
__snake_case : Dict = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case : Any = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case : Optional[int] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
__snake_case : List[str] = prepare_img()
__snake_case : Tuple = 256
__snake_case : Dict = 224
__snake_case : Any = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
__snake_case : Dict = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values
# original processing pipeline
__snake_case : int = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case : int = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case : Dict = model(_lowerCamelCase )
__snake_case : Dict = outputs.logits
__snake_case : List[str] = (1, 1000)
if "l1" in model_name:
__snake_case : Dict = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case : int = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case : int = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
__UpperCamelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 26 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__snake_case : Optional[int] = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
__snake_case : int = str(bin(_lowerCamelCase ) )[2:] # remove the leading "0b"
__snake_case : List[str] = max(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase ) , b_binary.zfill(_lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : List[str] , *__magic_name__ : Optional[Any] , **__magic_name__ : str ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
def _a ( _lowerCamelCase , _lowerCamelCase=100 , _lowerCamelCase=" " ) -> List[str]:
"""simple docstring"""
__snake_case : Any = text.split(_lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase )]
def _a ( _lowerCamelCase ) -> dict:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_lowerCamelCase ):
titles.append(title if title is not None else """""" )
texts.append(_lowerCamelCase )
return {"title": titles, "text": texts}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> dict:
"""simple docstring"""
__snake_case : Dict = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
__snake_case : int = ctx_encoder(input_ids.to(device=_lowerCamelCase ) , return_dict=_lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__snake_case : Optional[Any] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__snake_case : str = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
__snake_case : List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_lowerCamelCase )
__snake_case : List[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__snake_case : int = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
__snake_case : Dict = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase ) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
__snake_case : Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__snake_case : Optional[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_lowerCamelCase )
# And save the index
__snake_case : List[str] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _A :
lowercase__: str = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowercase__: Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowercase__: str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowercase__: str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowercase__: Optional[str] = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class _A :
lowercase__: Optional[int] = field(
default=__lowercase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowercase__: int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class _A :
lowercase__: int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowercase__: int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 26 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 1 |
'''simple docstring'''
import operator as op
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = []
__snake_case : Optional[int] = lambda _lowerCamelCase , _lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
__snake_case : List[Any] = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ )
else:
__snake_case : List[str] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ )
__snake_case : Dict = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ )
stack.append(
str(opr[x](int(_lowerCamelCase ) , int(_lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCamelCase = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 26 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 1 |
'''simple docstring'''
import baseaa
def _a ( _lowerCamelCase ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("""utf-8""" ) )
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
return baseaa.baadecode(_lowerCamelCase ).decode("""utf-8""" )
if __name__ == "__main__":
__UpperCamelCase = "Hello World!"
__UpperCamelCase = baseaa_encode(test)
print(encoded)
__UpperCamelCase = baseaa_decode(encoded)
print(decoded)
| 26 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__snake_case : str = b * b - 4 * a * c
__snake_case : Optional[Any] = (-b + sqrt(_lowerCamelCase )) / (2 * a)
__snake_case : Dict = (-b - sqrt(_lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case , __snake_case : Dict = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _A ( __lowercase ):
lowercase__: Union[List[PIL.Image.Image], np.ndarray]
lowercase__: Optional[List[bool]]
lowercase__: Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 26 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Dict = ShapEPipeline
lowercase__: Tuple = ['''prompt''']
lowercase__: str = ['''prompt''']
lowercase__: str = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
lowercase__: Dict = False
@property
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return 8
@property
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__magic_name__ )
@property
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__snake_case : Union[str, Any] = PriorTransformer(**__magic_name__ )
return model
@property
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : Dict = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__snake_case : Dict = ShapERenderer(**__magic_name__ )
return model
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : List[str] = self.dummy_prior
__snake_case : Any = self.dummy_text_encoder
__snake_case : Tuple = self.dummy_tokenizer
__snake_case : int = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=__magic_name__ , clip_sample=__magic_name__ , clip_sample_range=1.0 , )
__snake_case : int = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Any=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : List[Any] = torch.manual_seed(__magic_name__ )
else:
__snake_case : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : str = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = """cpu"""
__snake_case : Dict = self.get_dummy_components()
__snake_case : Dict = self.pipeline_class(**__magic_name__ )
__snake_case : List[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : str = output.images[0]
__snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : int = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = torch_device == """cpu"""
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__magic_name__ , relax_max_difference=__magic_name__ , )
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : int = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : List[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : str = 1
__snake_case : Optional[int] = 2
__snake_case : int = self.get_dummy_inputs(__magic_name__ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : List[str] = batch_size * [inputs[key]]
__snake_case : Optional[Any] = pipe(**__magic_name__ , num_images_per_prompt=__magic_name__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__snake_case : List[Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__snake_case : Optional[int] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(0 )
__snake_case : Dict = pipe(
"""a shark""" , generator=__magic_name__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 26 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class _A ( __lowercase ):
lowercase__: Any = '''audio-spectrogram-transformer'''
def __init__( self : List[Any] , __magic_name__ : Optional[Any]=7_68 , __magic_name__ : Optional[Any]=12 , __magic_name__ : int=12 , __magic_name__ : Union[str, Any]=30_72 , __magic_name__ : List[str]="gelu" , __magic_name__ : Any=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : Tuple=0.02 , __magic_name__ : Union[str, Any]=1E-12 , __magic_name__ : str=16 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=10 , __magic_name__ : Any=10 , __magic_name__ : Tuple=10_24 , __magic_name__ : Optional[int]=1_28 , **__magic_name__ : int , ) -> Dict:
"""simple docstring"""
super().__init__(**__magic_name__ )
__snake_case : Any = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Any = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : Dict = patch_size
__snake_case : Any = qkv_bias
__snake_case : List[Any] = frequency_stride
__snake_case : int = time_stride
__snake_case : Tuple = max_length
__snake_case : int = num_mel_bins
| 26 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
__snake_case : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case : Optional[int] = """"""
else:
__snake_case : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Any = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
__snake_case : int = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
__snake_case : Any = in_proj_bias[: config.hidden_size]
__snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : Tuple = in_proj_bias[-config.hidden_size :]
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = dct.pop(_lowerCamelCase )
__snake_case : Optional[Any] = val
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = ViTMSNConfig()
__snake_case : Union[str, Any] = 1000
__snake_case : str = """datasets/huggingface/label-files"""
__snake_case : Optional[int] = """imagenet-1k-id2label.json"""
__snake_case : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase ) , """r""" ) )
__snake_case : Union[str, Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : List[Any] = idalabel
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__snake_case : Dict = 384
__snake_case : Optional[int] = 1536
__snake_case : Any = 6
elif "l16" in checkpoint_url:
__snake_case : Tuple = 1024
__snake_case : Dict = 4096
__snake_case : Tuple = 24
__snake_case : Dict = 16
__snake_case : Any = 0.1
elif "b4" in checkpoint_url:
__snake_case : Optional[Any] = 4
elif "l7" in checkpoint_url:
__snake_case : List[Any] = 7
__snake_case : str = 1024
__snake_case : Union[str, Any] = 4096
__snake_case : Tuple = 24
__snake_case : Optional[Any] = 16
__snake_case : Optional[Any] = 0.1
__snake_case : int = ViTMSNModel(_lowerCamelCase )
__snake_case : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="""cpu""" )["""target_encoder"""]
__snake_case : Dict = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCamelCase )
__snake_case : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : Any = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
__snake_case : Any = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
__snake_case : Dict = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__snake_case : List[str] = model(**_lowerCamelCase )
__snake_case : Optional[int] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__snake_case : Union[str, Any] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
__snake_case : Optional[Any] = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
__snake_case : Optional[Any] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
__snake_case : str = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
__snake_case : str = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 26 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _a ( _lowerCamelCase ) -> list[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Dict = 11
__snake_case : List[Any] = int("""1""" + """0""" * digit_len )
for num in range(_lowerCamelCase , _lowerCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCamelCase , _lowerCamelCase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__snake_case : str = 10
return solutions
def _a ( _lowerCamelCase = 2 ) -> int:
"""simple docstring"""
__snake_case : List[Any] = 1.0
for fraction in fraction_list(_lowerCamelCase ):
__snake_case : List[Any] = Fraction(_lowerCamelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 26 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 1 |
'''simple docstring'''
import sys
from collections import defaultdict
class _A :
def __init__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = []
def lowercase__ ( self : int , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return self.node_position[vertex]
def lowercase__ ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = pos
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__snake_case : int = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__snake_case : int = 2 * start + 1
else:
__snake_case : Any = 2 * start + 2
if heap[smallest_child] < heap[start]:
__snake_case , __snake_case : Tuple = heap[smallest_child], positions[smallest_child]
__snake_case , __snake_case : Dict = (
heap[start],
positions[start],
)
__snake_case , __snake_case : Union[str, Any] = temp, tempa
__snake_case : Tuple = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = position[index]
while index != 0:
__snake_case : List[str] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__snake_case : Optional[Any] = heap[parent]
__snake_case : Dict = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
__snake_case : int = val
__snake_case : int = temp
self.set_position(__magic_name__ , __magic_name__ )
break
__snake_case : List[str] = parent
else:
__snake_case : Dict = val
__snake_case : Optional[int] = temp
self.set_position(__magic_name__ , 0 )
def lowercase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : Any = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = positions[0]
__snake_case : Optional[int] = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = Heap()
__snake_case : List[Any] = [0] * len(_lowerCamelCase )
__snake_case : Dict = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__snake_case : List[Any] = [] # Heap of Distance of vertices from their neighboring vertex
__snake_case : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
__snake_case : Optional[int] = []
__snake_case : List[str] = 1
__snake_case : Any = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__snake_case : List[str] = 0
__snake_case : List[Any] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
__snake_case : Tuple = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__snake_case : Any = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
__snake_case : Tuple = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
__snake_case : int = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase = int(input("Enter number of edges: ").strip())
__UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
lowercase__: Tuple = ['''input_values''', '''padding_mask''']
def __init__( self : List[Any] , __magic_name__ : int = 1 , __magic_name__ : int = 2_40_00 , __magic_name__ : float = 0.0 , __magic_name__ : float = None , __magic_name__ : float = None , **__magic_name__ : Dict , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , **__magic_name__ )
__snake_case : Dict = chunk_length_s
__snake_case : Optional[Any] = overlap
@property
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Union[str, Any] , __magic_name__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __magic_name__ : Optional[Union[bool, str, PaddingStrategy]] = None , __magic_name__ : Optional[bool] = False , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Optional[int] = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
__snake_case : Optional[Any] = True
__snake_case : Any = bool(
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__snake_case : Tuple = [np.asarray(__magic_name__ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
__snake_case : Any = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__snake_case : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case : str = [np.asarray(__magic_name__ ).T]
# verify inputs are valid
for idx, example in enumerate(__magic_name__ ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__snake_case : Dict = None
__snake_case : Union[str, Any] = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__snake_case : Optional[int] = min(array.shape[0] for array in raw_audio )
__snake_case : Union[str, Any] = int(np.floor(max_length / self.chunk_stride ) )
__snake_case : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__snake_case : Dict = max(array.shape[0] for array in raw_audio )
__snake_case : str = int(np.ceil(max_length / self.chunk_stride ) )
__snake_case : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
__snake_case : Tuple = """max_length"""
else:
__snake_case : Dict = input_values
# normal padding on batch
if padded_inputs is None:
__snake_case : str = self.pad(
__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , padding=__magic_name__ , return_attention_mask=__magic_name__ , )
if padding:
__snake_case : List[Any] = padded_inputs.pop("""attention_mask""" )
__snake_case : Tuple = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
__snake_case : Optional[Any] = example[..., None]
input_values.append(example.T )
__snake_case : List[Any] = input_values
if return_tensors is not None:
__snake_case : List[Any] = padded_inputs.convert_to_tensors(__magic_name__ )
return padded_inputs
| 26 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
while a != 0:
__snake_case , __snake_case : str = b % a, a
return b
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if gcd(_lowerCamelCase , _lowerCamelCase ) != 1:
__snake_case : Any = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase )
__snake_case , __snake_case , __snake_case : Dict = 1, 0, a
__snake_case , __snake_case , __snake_case : List[Any] = 0, 1, m
while va != 0:
__snake_case : List[str] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 26 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__UpperCamelCase = True
from torch.cuda.amp import autocast
__UpperCamelCase = logging.getLogger(__name__)
@dataclass
class _A :
lowercase__: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__: Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase__: Optional[bool] = field(
default=__lowercase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowercase__: Optional[bool] = field(
default=__lowercase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
lowercase__: Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowercase__: Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowercase__: Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__snake_case : int = logging.WARNING
if model_args.verbose_logging:
__snake_case : Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__snake_case : List[str] = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class _A :
lowercase__: str = field(
default=__lowercase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowercase__: Optional[str] = field(
default=__lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase__: Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowercase__: Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowercase__: Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
lowercase__: bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase__: Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowercase__: Optional[int] = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowercase__: Optional[float] = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class _A :
lowercase__: WavaVecaForPreTraining
lowercase__: WavaVecaFeatureExtractor
lowercase__: Union[bool, str] = "longest"
lowercase__: Optional[int] = None
lowercase__: Optional[int] = None
def __call__( self : Any , __magic_name__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
__snake_case : Any = self.feature_extractor.pad(
__magic_name__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
__snake_case : int = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
__snake_case : Optional[Any] = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__snake_case : Optional[int] = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
__snake_case : Any = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__snake_case : Dict = 1
__snake_case : Optional[Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__snake_case : int = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__magic_name__ , min_masks=2 , )
return batch
class _A ( __lowercase ):
def __init__( self : str , *__magic_name__ : Any , __magic_name__ : Optional[Any]=1 , __magic_name__ : int=0 , __magic_name__ : Tuple=1.0 , **__magic_name__ : Any ) -> Dict:
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
__snake_case : List[str] = 0
__snake_case : Optional[Any] = max_gumbel_temp
__snake_case : int = min_gumbel_temp
__snake_case : Union[str, Any] = gumbel_temp_decay
def lowercase__ ( self : Optional[Any] , __magic_name__ : nn.Module , __magic_name__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
"""simple docstring"""
model.train()
__snake_case : int = self._prepare_inputs(__magic_name__ )
if self.use_amp:
with autocast():
__snake_case : str = self.compute_loss(__magic_name__ , __magic_name__ )
else:
__snake_case : Tuple = self.compute_loss(__magic_name__ , __magic_name__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__snake_case : Union[str, Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__snake_case : str = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__snake_case : List[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__magic_name__ ).backward()
elif self.use_apex:
with amp.scale_loss(__magic_name__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__magic_name__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _a ( ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
__snake_case : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__snake_case : int = DatasetDict()
__snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
__snake_case : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__snake_case : Optional[Any] = DatasetDict()
__snake_case : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
__snake_case : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__snake_case : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
__snake_case , __snake_case : int = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__snake_case : List[str] = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
__snake_case : Optional[Any] = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__snake_case : str = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__snake_case : str = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
__snake_case : Tuple = WavaVecaForPreTraining(_lowerCamelCase )
__snake_case : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
__snake_case : List[str] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class _A :
def __init__( self : Optional[Any] , __magic_name__ : list[str] ) -> Tuple:
"""simple docstring"""
__snake_case : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(__magic_name__ )
self.set_fail_transitions()
def lowercase__ ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase__ ( self : str , __magic_name__ : str ) -> None:
"""simple docstring"""
__snake_case : Optional[Any] = 0
for character in keyword:
__snake_case : int = self.find_next_state(__magic_name__ , __magic_name__ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__snake_case : Union[str, Any] = len(self.adlist ) - 1
else:
__snake_case : List[str] = next_state
self.adlist[current_state]["output"].append(__magic_name__ )
def lowercase__ ( self : Dict ) -> None:
"""simple docstring"""
__snake_case : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(__magic_name__ )
__snake_case : Dict = 0
while q:
__snake_case : Optional[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__magic_name__ )
__snake_case : Dict = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(__magic_name__ , self.adlist[child]["""value"""] ) is None
and state != 0
):
__snake_case : Optional[int] = self.adlist[state]["""fail_state"""]
__snake_case : Union[str, Any] = self.find_next_state(
__magic_name__ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
__snake_case : List[Any] = 0
__snake_case : Any = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def lowercase__ ( self : List[str] , __magic_name__ : str ) -> dict[str, list[int]]:
"""simple docstring"""
__snake_case : dict = {} # returns a dict with keywords and list of its occurrences
__snake_case : Optional[int] = 0
for i in range(len(__magic_name__ ) ):
while (
self.find_next_state(__magic_name__ , string[i] ) is None
and current_state != 0
):
__snake_case : List[Any] = self.adlist[current_state]["""fail_state"""]
__snake_case : List[Any] = self.find_next_state(__magic_name__ , string[i] )
if next_state is None:
__snake_case : int = 0
else:
__snake_case : Dict = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__snake_case : Union[str, Any] = []
result[key].append(i - len(__magic_name__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 1 |
'''simple docstring'''
__UpperCamelCase = "Alexander Joslin"
import operator as op
from .stack import Stack
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Dict = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
__snake_case : Stack[int] = Stack()
__snake_case : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCamelCase )
elif i == ")":
# RULE 4
__snake_case : Dict = operator_stack.peek()
operator_stack.pop()
__snake_case : Dict = operand_stack.peek()
operand_stack.pop()
__snake_case : Tuple = operand_stack.peek()
operand_stack.pop()
__snake_case : Optional[Any] = operators[opr](_lowerCamelCase , _lowerCamelCase )
operand_stack.push(_lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__UpperCamelCase = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 26 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class _A ( __lowercase ):
lowercase__: List[str] = VOCAB_FILES_NAMES
lowercase__: str = PRETRAINED_VOCAB_FILES_MAP
lowercase__: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__: List[str] = ['''input_ids''', '''attention_mask''']
lowercase__: Optional[int] = RobertaTokenizer
def __init__( self : List[str] , __magic_name__ : Any=None , __magic_name__ : Dict=None , __magic_name__ : List[str]=None , __magic_name__ : Any="replace" , __magic_name__ : int="<s>" , __magic_name__ : Union[str, Any]="</s>" , __magic_name__ : Any="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Optional[int]="<unk>" , __magic_name__ : Any="<pad>" , __magic_name__ : str="<mask>" , __magic_name__ : int=False , __magic_name__ : Optional[Any]=True , **__magic_name__ : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
__snake_case : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : Union[str, Any] = getattr(__magic_name__ , pre_tok_state.pop("""type""" ) )
__snake_case : Optional[Any] = add_prefix_space
__snake_case : List[str] = pre_tok_class(**__magic_name__ )
__snake_case : int = add_prefix_space
__snake_case : Dict = """post_processor"""
__snake_case : List[Any] = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
__snake_case : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
__snake_case : str = tuple(state["""cls"""] )
__snake_case : Optional[int] = False
if state.get("""add_prefix_space""" , __magic_name__ ) != add_prefix_space:
__snake_case : Union[str, Any] = add_prefix_space
__snake_case : List[str] = True
if state.get("""trim_offsets""" , __magic_name__ ) != trim_offsets:
__snake_case : List[str] = trim_offsets
__snake_case : Any = True
if changes_to_apply:
__snake_case : Tuple = getattr(__magic_name__ , state.pop("""type""" ) )
__snake_case : str = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self : Optional[Any] , __magic_name__ : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
__snake_case : List[Any] = value
def lowercase__ ( self : Union[str, Any] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__snake_case : Any = kwargs.get("""is_split_into_words""" , __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[Any] , *__magic_name__ : Any , **__magic_name__ : Optional[Any] ) -> BatchEncoding:
"""simple docstring"""
__snake_case : List[str] = kwargs.get("""is_split_into_words""" , __magic_name__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__snake_case : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Dict=None ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__snake_case : List[str] = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 26 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 1 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = 0
if start < end:
__snake_case : int = randint(_lowerCamelCase , _lowerCamelCase )
__snake_case : int = a[end]
__snake_case : str = a[pivot]
__snake_case : Tuple = temp
__snake_case , __snake_case : Optional[int] = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 )
count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase )
return count
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = 0
__snake_case : Any = randint(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[int] = a[end]
__snake_case : Union[str, Any] = a[pivot]
__snake_case : Any = temp
__snake_case : Any = start - 1
for index in range(_lowerCamelCase , _lowerCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__snake_case : List[Any] = new_pivot_index + 1
__snake_case : Optional[Any] = a[new_pivot_index]
__snake_case : Dict = a[index]
__snake_case : Optional[Any] = temp
__snake_case : Any = a[new_pivot_index + 1]
__snake_case : Optional[int] = a[end]
__snake_case : Union[str, Any] = temp
return new_pivot_index + 1, count
__UpperCamelCase = TemporaryFile()
__UpperCamelCase = 100 # 1000 elements are to be sorted
__UpperCamelCase , __UpperCamelCase = 0, 1 # mean and standard deviation
__UpperCamelCase = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
__UpperCamelCase = np.load(outfile)
__UpperCamelCase = len(M) - 1
__UpperCamelCase = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 26 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
__UpperCamelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
__UpperCamelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def lowercase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : int=None , __magic_name__ : Tuple="uniform_average" , __magic_name__ : Tuple=True ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = mean_squared_error(
__magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ )
return {"mse": mse}
| 26 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 1 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _A ( __lowercase ):
lowercase__: Optional[int] = '''xglm'''
lowercase__: Tuple = ['''past_key_values''']
lowercase__: Any = {
'''num_attention_heads''': '''attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple , __magic_name__ : str=25_60_08 , __magic_name__ : int=20_48 , __magic_name__ : List[Any]=10_24 , __magic_name__ : Union[str, Any]=40_96 , __magic_name__ : Optional[Any]=24 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Dict="gelu" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Dict=0.0 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Any=True , __magic_name__ : List[str]=True , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Any=1 , __magic_name__ : Tuple=0 , __magic_name__ : Any=2 , **__magic_name__ : int , ) -> int:
"""simple docstring"""
__snake_case : Tuple = vocab_size
__snake_case : Any = max_position_embeddings
__snake_case : Optional[Any] = d_model
__snake_case : Tuple = ffn_dim
__snake_case : Optional[int] = num_layers
__snake_case : Union[str, Any] = attention_heads
__snake_case : Union[str, Any] = activation_function
__snake_case : Optional[int] = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : Any = activation_dropout
__snake_case : Dict = layerdrop
__snake_case : List[Any] = init_std
__snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : Any = use_cache
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
| 26 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 1 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : int = 32 , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_55 , __magic_name__ : bool = True , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , __magic_name__ : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , __magic_name__ : bool = True , __magic_name__ : Any=7 , __magic_name__ : Tuple=30 , __magic_name__ : List[Any]=4_00 , __magic_name__ : Optional[Any]=3 , ) -> Tuple:
"""simple docstring"""
__snake_case : int = parent
__snake_case : Optional[Any] = do_resize
__snake_case : List[str] = size if size is not None else {"""shortest_edge""": 2_88}
__snake_case : Any = size_divisor
__snake_case : List[str] = do_rescale
__snake_case : Optional[int] = rescale_factor
__snake_case : Any = do_normalize
__snake_case : Any = do_center_crop
__snake_case : Tuple = image_mean
__snake_case : Tuple = image_std
__snake_case : int = do_pad
__snake_case : Union[str, Any] = batch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Optional[int] = min_resolution
__snake_case : str = max_resolution
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowercase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=False ) -> Any:
"""simple docstring"""
if not batched:
__snake_case : Optional[Any] = self.size["""shortest_edge"""]
__snake_case : Optional[int] = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
__snake_case , __snake_case : List[Any] = image.size
else:
__snake_case , __snake_case : List[str] = image.shape[1], image.shape[2]
__snake_case : Optional[Any] = size / min(__magic_name__ , __magic_name__ )
if h < w:
__snake_case , __snake_case : List[str] = size, scale * w
else:
__snake_case , __snake_case : int = scale * h, size
__snake_case : str = int((13_33 / 8_00) * size )
if max(__magic_name__ , __magic_name__ ) > max_size:
__snake_case : Dict = max_size / max(__magic_name__ , __magic_name__ )
__snake_case : str = newh * scale
__snake_case : List[Any] = neww * scale
__snake_case , __snake_case : int = int(newh + 0.5 ), int(neww + 0.5 )
__snake_case , __snake_case : Tuple = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__snake_case : Tuple = []
for image in image_inputs:
__snake_case , __snake_case : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case : Dict = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
__snake_case : Union[str, Any] = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( __lowercase , unittest.TestCase ):
lowercase__: Any = BridgeTowerImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = BridgeTowerImageProcessingTester(self )
@property
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , """image_mean""" ) )
self.assertTrue(hasattr(__magic_name__ , """image_std""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_normalize""" ) )
self.assertTrue(hasattr(__magic_name__ , """do_resize""" ) )
self.assertTrue(hasattr(__magic_name__ , """size""" ) )
self.assertTrue(hasattr(__magic_name__ , """size_divisor""" ) )
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case : int = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Dict = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case : Optional[int] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case : Optional[int] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Any = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case : Tuple = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
__snake_case : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case : Optional[int] = image_processing(__magic_name__ , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case : List[Any] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 26 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _A :
lowercase__: int
lowercase__: int
class _A :
def __init__( self : Optional[int] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : list[list[Edge]] = [[] for _ in range(__magic_name__ )]
__snake_case : Tuple = size
def __getitem__( self : Optional[int] , __magic_name__ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return self._size
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> str:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(__magic_name__ , __magic_name__ ) )
def lowercase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : int ) -> int | None:
"""simple docstring"""
__snake_case : Tuple = deque([start_vertex] )
__snake_case : list[int | None] = [None] * self.size
__snake_case : int = 0
while queue:
__snake_case : str = queue.popleft()
__snake_case : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__snake_case : int = current_distance + edge.weight
__snake_case : Optional[int] = distances[edge.destination_vertex]
if (
isinstance(__magic_name__ , __magic_name__ )
and new_distance >= dest_vertex_distance
):
continue
__snake_case : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 1 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
__snake_case : Tuple = len(vectors[0] )
# Will help select random centroids from among the available vectors
__snake_case : Dict = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__snake_case : Dict = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__snake_case : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__snake_case : List[Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
__snake_case : Dict = tf.placeholder("""float64""" , [dim] )
__snake_case : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__snake_case : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__snake_case : Optional[Any] = tf.placeholder("""int32""" )
__snake_case : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase , _lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__snake_case : Union[str, Any] = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__snake_case : Optional[int] = tf.reduce_mean(_lowerCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__snake_case : int = tf.placeholder("""float""" , [dim] )
__snake_case : Optional[int] = tf.placeholder("""float""" , [dim] )
__snake_case : int = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase , _lowerCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__snake_case : Optional[Any] = tf.placeholder("""float""" , [noofclusters] )
__snake_case : Optional[int] = tf.argmin(_lowerCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__snake_case : str = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__snake_case : int = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
__snake_case : Optional[int] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__snake_case : Union[str, Any] = [
sess.run(_lowerCamelCase , feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__snake_case : Dict = sess.run(
_lowerCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
__snake_case : str = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__snake_case : str = sess.run(
_lowerCamelCase , feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__snake_case : Tuple = sess.run(_lowerCamelCase )
__snake_case : str = sess.run(_lowerCamelCase )
return centroids, assignments
| 26 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _A ( tf.keras.layers.Layer ):
def __init__( self : Dict , __magic_name__ : Dict[str, int] , __magic_name__ : List[str] , __magic_name__ : int = None , __magic_name__ : int = None ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : str = pad_token_id
__snake_case : Tuple = max_length
__snake_case : Dict = vocab
__snake_case : Optional[Any] = merges
__snake_case : Optional[Any] = BytePairTokenizer(__magic_name__ , __magic_name__ , sequence_length=__magic_name__ )
@classmethod
def lowercase__ ( cls : int , __magic_name__ : GPTaTokenizer , *__magic_name__ : int , **__magic_name__ : int ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [""" """.join(__magic_name__ ) for m in tokenizer.bpe_ranks.keys()]
__snake_case : List[str] = tokenizer.get_vocab()
return cls(__magic_name__ , __magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def lowercase__ ( cls : int , __magic_name__ : Union[str, os.PathLike] , *__magic_name__ : Dict , **__magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = GPTaTokenizer.from_pretrained(__magic_name__ , *__magic_name__ , **__magic_name__ )
return cls.from_tokenizer(__magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def lowercase__ ( cls : Any , __magic_name__ : Dict ) -> List[Any]:
"""simple docstring"""
return cls(**__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : int = None ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tf_tokenizer(__magic_name__ )
__snake_case : Any = tf.ones_like(__magic_name__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
__snake_case : Dict = max_length if max_length is not None else self.max_length
if max_length is not None:
__snake_case , __snake_case : Any = pad_model_inputs(
__magic_name__ , max_seq_length=__magic_name__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 26 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class _A ( __lowercase ):
lowercase__: str = '''ibert'''
def __init__( self : str , __magic_name__ : str=3_05_22 , __magic_name__ : List[str]=7_68 , __magic_name__ : List[Any]=12 , __magic_name__ : Union[str, Any]=12 , __magic_name__ : int=30_72 , __magic_name__ : Dict="gelu" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : int=5_12 , __magic_name__ : str=2 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Optional[Any]=1E-12 , __magic_name__ : Optional[Any]=1 , __magic_name__ : str=0 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[int]="absolute" , __magic_name__ : Dict=False , __magic_name__ : Any="none" , **__magic_name__ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
__snake_case : Tuple = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[str] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : List[Any] = type_vocab_size
__snake_case : int = initializer_range
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : List[Any] = position_embedding_type
__snake_case : Optional[int] = quant_mode
__snake_case : Optional[int] = force_dequant
class _A ( __lowercase ):
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__snake_case : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = list[list[int]]
# assigning initial values to the grid
__UpperCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( _lowerCamelCase ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( _lowerCamelCase ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(_lowerCamelCase ):
__snake_case , __snake_case : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = digit
if sudoku(_lowerCamelCase ) is not None:
return grid
__snake_case : List[str] = 0
return None
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(_lowerCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__UpperCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 26 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A :
def __init__( self : List[str] , __magic_name__ : Any , __magic_name__ : Dict=13 , __magic_name__ : Any=30 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Tuple=True , __magic_name__ : List[str]=True , __magic_name__ : Dict=32 , __magic_name__ : Dict=5 , __magic_name__ : Tuple=4 , __magic_name__ : str=37 , __magic_name__ : str="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Dict=10 , __magic_name__ : str=0.02 , __magic_name__ : str=None , __magic_name__ : Optional[Any]=2 , ) -> str:
"""simple docstring"""
__snake_case : int = parent
__snake_case : List[Any] = batch_size
__snake_case : Any = image_size
__snake_case : Optional[Any] = patch_size
__snake_case : str = num_channels
__snake_case : List[Any] = is_training
__snake_case : int = use_labels
__snake_case : Optional[int] = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : str = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Any = scope
__snake_case : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case : Tuple = (image_size // patch_size) ** 2
__snake_case : Optional[int] = num_patches + 1
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase__ ( self : int , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : str ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = ViTModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Dict = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = ViTForMaskedImageModeling(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[str] = model(__magic_name__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : int = 1
__snake_case : List[Any] = ViTForMaskedImageModeling(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = self.type_sequence_label_size
__snake_case : Optional[int] = ViTForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[str] = model(__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : int = 1
__snake_case : List[str] = ViTForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Optional[Any] = config_and_inputs
__snake_case : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase__: Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase__: Tuple = True
lowercase__: str = False
lowercase__: Optional[Any] = False
lowercase__: Optional[Any] = False
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Any = ViTModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(__magic_name__ )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = ViTModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__snake_case : Any = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(__magic_name__ )
__snake_case : List[str] = self.default_image_processor
__snake_case : Any = prepare_img()
__snake_case : Optional[Any] = image_processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**__magic_name__ )
# verify the logits
__snake_case : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : int = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(__magic_name__ )
__snake_case : List[str] = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_80 )
__snake_case : Dict = prepare_img()
__snake_case : Tuple = image_processor(images=__magic_name__ , return_tensors="""pt""" )
__snake_case : Optional[int] = inputs.pixel_values.to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : List[str] = model(__magic_name__ , interpolate_pos_encoding=__magic_name__ )
# verify the logits
__snake_case : Any = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , __magic_name__ )
__snake_case : Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__snake_case : int = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
__snake_case : str = self.default_image_processor
__snake_case : Optional[Any] = prepare_img()
__snake_case : Dict = image_processor(images=__magic_name__ , return_tensors="""pt""" )
__snake_case : Dict = inputs.pixel_values.to(__magic_name__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__snake_case : List[Any] = model(__magic_name__ )
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 1 |
'''simple docstring'''
class _A :
def __init__( self : List[str] ) -> None:
"""simple docstring"""
__snake_case : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__snake_case : Optional[int] = False
def lowercase__ ( self : Union[str, Any] , __magic_name__ : list[str] ) -> None:
"""simple docstring"""
for word in words:
self.insert(__magic_name__ )
def lowercase__ ( self : Any , __magic_name__ : str ) -> None:
"""simple docstring"""
__snake_case : Any = self
for char in word:
if char not in curr.nodes:
__snake_case : Dict = TrieNode()
__snake_case : Any = curr.nodes[char]
__snake_case : Tuple = True
def lowercase__ ( self : Dict , __magic_name__ : str ) -> bool:
"""simple docstring"""
__snake_case : Optional[int] = self
for char in word:
if char not in curr.nodes:
return False
__snake_case : Tuple = curr.nodes[char]
return curr.is_leaf
def lowercase__ ( self : List[str] , __magic_name__ : str ) -> None:
"""simple docstring"""
def _delete(__magic_name__ : TrieNode , __magic_name__ : str , __magic_name__ : int ) -> bool:
if index == len(__magic_name__ ):
# If word does not exist
if not curr.is_leaf:
return False
__snake_case : str = False
return len(curr.nodes ) == 0
__snake_case : Union[str, Any] = word[index]
__snake_case : Any = curr.nodes.get(__magic_name__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__snake_case : Optional[Any] = _delete(__magic_name__ , __magic_name__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __magic_name__ , 0 )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> None:
"""simple docstring"""
if node.is_leaf:
print(_lowerCamelCase , end=""" """ )
for key, value in node.nodes.items():
print_words(_lowerCamelCase , word + key )
def _a ( ) -> bool:
"""simple docstring"""
__snake_case : str = """banana bananas bandana band apple all beast""".split()
__snake_case : List[str] = TrieNode()
root.insert_many(_lowerCamelCase )
# print_words(root, "")
assert all(root.find(_lowerCamelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _a ( _lowerCamelCase , _lowerCamelCase ) -> None:
"""simple docstring"""
print(str(_lowerCamelCase ) , """works!""" if passes else """doesn't work :(""" )
def _a ( ) -> None:
"""simple docstring"""
assert test_trie()
def _a ( ) -> None:
"""simple docstring"""
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from typing import Any
class _A :
def __init__( self : Tuple ) -> None:
"""simple docstring"""
__snake_case : list[Any] = []
__snake_case : int = 0
__snake_case : int = 0
def lowercase__ ( self : List[str] ) -> bool:
"""simple docstring"""
return self.head == self.tail
def lowercase__ ( self : int , __magic_name__ : Any ) -> None:
"""simple docstring"""
self.data.append(__magic_name__ )
__snake_case : Dict = self.tail + 1
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = self.data[self.head]
__snake_case : Optional[Any] = self.head + 1
return ret
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
return self.tail - self.head
def lowercase__ ( self : int ) -> None:
"""simple docstring"""
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class _A :
def __init__( self : str , __magic_name__ : Any ) -> None:
"""simple docstring"""
__snake_case : List[Any] = data
__snake_case : MyNode | None = None
__snake_case : MyNode | None = None
__snake_case : int = 1
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.data
def lowercase__ ( self : Dict ) -> MyNode | None:
"""simple docstring"""
return self.left
def lowercase__ ( self : Optional[int] ) -> MyNode | None:
"""simple docstring"""
return self.right
def lowercase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return self.height
def lowercase__ ( self : Any , __magic_name__ : Any ) -> None:
"""simple docstring"""
__snake_case : Any = data
def lowercase__ ( self : Optional[int] , __magic_name__ : MyNode | None ) -> None:
"""simple docstring"""
__snake_case : List[str] = node
def lowercase__ ( self : List[Any] , __magic_name__ : MyNode | None ) -> None:
"""simple docstring"""
__snake_case : List[Any] = node
def lowercase__ ( self : Optional[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
__snake_case : Tuple = height
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.get_height()
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if a > b:
return a
return b
def _a ( _lowerCamelCase ) -> MyNode:
"""simple docstring"""
print("""left rotation node:""" , node.get_data() )
__snake_case : Optional[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_lowerCamelCase )
__snake_case : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
__snake_case : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCamelCase )
return ret
def _a ( _lowerCamelCase ) -> MyNode:
"""simple docstring"""
print("""right rotation node:""" , node.get_data() )
__snake_case : Optional[Any] = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_lowerCamelCase )
__snake_case : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
__snake_case : Any = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_lowerCamelCase )
return ret
def _a ( _lowerCamelCase ) -> MyNode:
"""simple docstring"""
__snake_case : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_lowerCamelCase ) )
return right_rotation(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> MyNode:
"""simple docstring"""
__snake_case : Optional[int] = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_lowerCamelCase ) )
return left_rotation(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> MyNode | None:
"""simple docstring"""
if node is None:
return MyNode(_lowerCamelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _lowerCamelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__snake_case : Union[str, Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__snake_case : Any = right_rotation(_lowerCamelCase )
else:
__snake_case : Tuple = lr_rotation(_lowerCamelCase )
else:
node.set_right(insert_node(node.get_right() , _lowerCamelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__snake_case : Optional[int] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__snake_case : Optional[int] = rl_rotation(_lowerCamelCase )
else:
__snake_case : Tuple = left_rotation(_lowerCamelCase )
__snake_case : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_lowerCamelCase )
return node
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
while True:
__snake_case : Dict = root.get_right()
if right_child is None:
break
__snake_case : List[Any] = right_child
return root.get_data()
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
while True:
__snake_case : Optional[Any] = root.get_left()
if left_child is None:
break
__snake_case : int = left_child
return root.get_data()
def _a ( _lowerCamelCase , _lowerCamelCase ) -> MyNode | None:
"""simple docstring"""
__snake_case : int = root.get_left()
__snake_case : int = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__snake_case : Tuple = get_left_most(_lowerCamelCase )
root.set_data(_lowerCamelCase )
root.set_right(del_node(_lowerCamelCase , _lowerCamelCase ) )
elif left_child is not None:
__snake_case : Optional[int] = left_child
elif right_child is not None:
__snake_case : str = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(_lowerCamelCase , _lowerCamelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_lowerCamelCase , _lowerCamelCase ) )
if get_height(_lowerCamelCase ) - get_height(_lowerCamelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__snake_case : Optional[Any] = left_rotation(_lowerCamelCase )
else:
__snake_case : str = rl_rotation(_lowerCamelCase )
elif get_height(_lowerCamelCase ) - get_height(_lowerCamelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__snake_case : List[Any] = right_rotation(_lowerCamelCase )
else:
__snake_case : List[str] = lr_rotation(_lowerCamelCase )
__snake_case : List[Any] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_lowerCamelCase )
return root
class _A :
def __init__( self : int ) -> None:
"""simple docstring"""
__snake_case : MyNode | None = None
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
return get_height(self.root )
def lowercase__ ( self : Tuple , __magic_name__ : Any ) -> None:
"""simple docstring"""
print("""insert:""" + str(__magic_name__ ) )
__snake_case : Optional[Any] = insert_node(self.root , __magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : Any ) -> None:
"""simple docstring"""
print("""delete:""" + str(__magic_name__ ) )
if self.root is None:
print("""Tree is empty!""" )
return
__snake_case : str = del_node(self.root , __magic_name__ )
def __str__( self : Dict , ) -> str: # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
__snake_case : List[Any] = """"""
__snake_case : int = MyQueue()
q.push(self.root )
__snake_case : int = self.get_height()
if layer == 0:
return output
__snake_case : Any = 0
while not q.is_empty():
__snake_case : Optional[int] = q.pop()
__snake_case : Optional[int] = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__magic_name__ )
q.push(__magic_name__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__snake_case : Tuple = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , __magic_name__ ) - 1:
__snake_case : Tuple = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__UpperCamelCase = AVLtree()
__UpperCamelCase = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 26 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _a ( _lowerCamelCase="" ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = tempfile.mkdtemp()
return os.path.join(_lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__snake_case : Tuple = AgentAudio(__magic_name__ )
__snake_case : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__magic_name__ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__magic_name__ ) )
# Ensure that the file contains the same value as the original tensor
__snake_case , __snake_case : Optional[Any] = sf.read(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , torch.tensor(__magic_name__ ) , atol=1E-4 ) )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
__snake_case : Tuple = get_new_path(suffix=""".wav""" )
sf.write(__magic_name__ , __magic_name__ , 1_60_00 )
__snake_case : int = AgentAudio(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , __magic_name__ )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : int = torch.randint(0 , 2_56 , (64, 64, 3) )
__snake_case : List[str] = AgentImage(__magic_name__ )
__snake_case : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__magic_name__ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__magic_name__ ) )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__snake_case : Tuple = Image.open(__magic_name__ )
__snake_case : Optional[int] = AgentImage(__magic_name__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__magic_name__ ) )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__snake_case : List[str] = Image.open(__magic_name__ )
__snake_case : Union[str, Any] = AgentImage(__magic_name__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__magic_name__ ) )
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
__snake_case : List[str] = """Hey!"""
__snake_case : Optional[Any] = AgentText(__magic_name__ )
self.assertEqual(__magic_name__ , agent_type.to_string() )
self.assertEqual(__magic_name__ , agent_type.to_raw() )
self.assertEqual(__magic_name__ , __magic_name__ )
| 26 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
_enforce_args(_lowerCamelCase , _lowerCamelCase )
if n == 0:
return 0
__snake_case : Optional[Any] = float("""-inf""" )
for i in range(1 , n + 1 ):
__snake_case : List[str] = max(
_lowerCamelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , _lowerCamelCase ) )
return max_revue
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
_enforce_args(_lowerCamelCase , _lowerCamelCase )
__snake_case : Union[str, Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__snake_case : List[Any] = float("""-inf""" )
for i in range(1 , n + 1 ):
__snake_case : Any = max(
_lowerCamelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _lowerCamelCase , _lowerCamelCase ) , )
__snake_case : Optional[int] = max_revenue
return max_rev[n]
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
_enforce_args(_lowerCamelCase , _lowerCamelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__snake_case : Any = [float("""-inf""" ) for _ in range(n + 1 )]
__snake_case : Dict = 0
for i in range(1 , n + 1 ):
__snake_case : Dict = max_rev[i]
for j in range(1 , i + 1 ):
__snake_case : Dict = max(_lowerCamelCase , prices[j - 1] + max_rev[i - j] )
__snake_case : Optional[int] = max_revenue_i
return max_rev[n]
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if n < 0:
__snake_case : List[Any] = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(_lowerCamelCase )
if n > len(_lowerCamelCase ):
__snake_case : int = (
"""Each integral piece of rod must have a corresponding price. """
F'''Got n = {n} but length of prices = {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[int] = [6, 10, 12, 15, 20, 23]
__snake_case : List[str] = len(_lowerCamelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__snake_case : Optional[int] = 36
__snake_case : List[str] = top_down_cut_rod(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = bottom_up_cut_rod(_lowerCamelCase , _lowerCamelCase )
__snake_case : str = naive_cut_rod_recursive(_lowerCamelCase , _lowerCamelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 1 |
'''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
def decorator(_lowerCamelCase ):
__snake_case : str = getattr(_lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(_lowerCamelCase , """handle_key""" , _lowerCamelCase )
return func
return decorator
def _a ( *_lowerCamelCase ) -> str:
"""simple docstring"""
def decorator(_lowerCamelCase ):
__snake_case : List[Any] = getattr(_lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(_lowerCamelCase , """handle_key""" , _lowerCamelCase )
return func
return decorator
class _A ( __lowercase ):
def __new__( cls : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = super().__new__(cls , __magic_name__ , __magic_name__ , __magic_name__ )
if not hasattr(__magic_name__ , """key_handler""" ):
setattr(__magic_name__ , """key_handler""" , {} )
setattr(__magic_name__ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__snake_case : Optional[int] = getattr(__magic_name__ , """handle_key""" , [] )
for key in handled_keys:
__snake_case : int = value
return new_cls
@staticmethod
def lowercase__ ( cls : Any ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
__snake_case : Tuple = ord(__magic_name__ )
__snake_case : List[Any] = cls.key_handler.get(__magic_name__ )
if handler:
__snake_case : Any = char
return handler(cls )
else:
return None
def _a ( cls ) -> str:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 26 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _A :
def __init__( self : Any , __magic_name__ : Dict , __magic_name__ : Optional[Any]=2 , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=False , __magic_name__ : Optional[int]=10 , __magic_name__ : Optional[int]=3 , __magic_name__ : Optional[int]=32 * 4 , __magic_name__ : int=32 * 6 , __magic_name__ : List[Any]=4 , __magic_name__ : List[Any]=32 , ) -> Dict:
"""simple docstring"""
__snake_case : str = parent
__snake_case : int = batch_size
__snake_case : int = is_training
__snake_case : Tuple = use_auxiliary_loss
__snake_case : List[str] = num_queries
__snake_case : str = num_channels
__snake_case : Dict = min_size
__snake_case : int = max_size
__snake_case : List[Any] = num_labels
__snake_case : Union[str, Any] = mask_feature_size
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__magic_name__ )
__snake_case : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__magic_name__ )
__snake_case : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__magic_name__ ) > 0.5
).float()
__snake_case : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=__magic_name__ ) > 0.5).long()
__snake_case : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Any = self.prepare_config_and_inputs()
__snake_case : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = output.encoder_hidden_states
__snake_case : List[str] = output.pixel_decoder_hidden_states
__snake_case : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , config.decoder_config.decoder_layers )
def lowercase__ ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Tuple=False ) -> int:
"""simple docstring"""
with torch.no_grad():
__snake_case : str = MaskFormerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[Any] = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
__snake_case : int = model(__magic_name__ , output_hidden_states=__magic_name__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = MaskFormerForInstanceSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
def comm_check_on_output(__magic_name__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__snake_case : Tuple = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
__snake_case : List[Any] = model(__magic_name__ )
comm_check_on_output(__magic_name__ )
__snake_case : List[Any] = model(
pixel_values=__magic_name__ , pixel_mask=__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
comm_check_on_output(__magic_name__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowercase__: Tuple = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowercase__: Optional[Any] = False
lowercase__: Union[str, Any] = False
lowercase__: Optional[int] = False
lowercase__: Dict = False
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = MaskFormerModelTester(self )
__snake_case : Optional[int] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__magic_name__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(__magic_name__ )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@slow
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
__snake_case : str = MaskFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Any = (self.model_tester.min_size,) * 2
__snake_case : List[Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__magic_name__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__magic_name__ ),
"""class_labels""": torch.zeros(2 , 10 , device=__magic_name__ ).long(),
}
__snake_case : str = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__magic_name__ )
__snake_case : Optional[Any] = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(__magic_name__ ).to(__magic_name__ )
__snake_case : Any = model(**__magic_name__ , output_attentions=__magic_name__ )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__snake_case : Dict = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs()
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
__snake_case : Dict = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ).loss
loss.backward()
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
__snake_case : Any = True
__snake_case : str = True
__snake_case : List[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
__snake_case : str = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
__snake_case : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__snake_case : Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__snake_case : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__snake_case : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__magic_name__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase = 1E-4
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__magic_name__ )
__snake_case : List[str] = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Optional[int] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
__snake_case : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__snake_case : Dict = model(**__magic_name__ )
__snake_case : Tuple = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
__snake_case : Optional[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
__snake_case : Dict = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__magic_name__ )
.eval()
)
__snake_case : List[Any] = self.default_image_processor
__snake_case : List[str] = prepare_img()
__snake_case : Tuple = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
__snake_case : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
# masks_queries_logits
__snake_case : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case : Optional[int] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
__snake_case : Optional[Any] = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
__snake_case : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case : Optional[int] = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__magic_name__ )
.eval()
)
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[str] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
__snake_case : Dict = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__snake_case : str = model(**__magic_name__ )
# masks_queries_logits
__snake_case : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case : List[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
__snake_case : Union[str, Any] = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
__snake_case : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case : Dict = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__magic_name__ )
.eval()
)
__snake_case : List[str] = self.default_image_processor
__snake_case : str = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__snake_case : List[str] = inputs["""pixel_values"""].to(__magic_name__ )
__snake_case : Optional[Any] = [el.to(__magic_name__ ) for el in inputs["""mask_labels"""]]
__snake_case : Dict = [el.to(__magic_name__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
| 26 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 1 |
'''simple docstring'''
from math import factorial
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(_lowerCamelCase ) // (factorial(_lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 26 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__UpperCamelCase = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__UpperCamelCase = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__UpperCamelCase = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[Any] ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def lowercase__ ( self : Dict , __magic_name__ : List[List[List[str]]] , __magic_name__ : List[List[str]] , __magic_name__ : int = 1 , __magic_name__ : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__magic_name__ , hypotheses=__magic_name__ , min_len=__magic_name__ , max_len=__magic_name__ )
}
| 26 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _A ( unittest.TestCase ):
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = jnp.ones((batch_size, length) ) / length
return scores
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = None
__snake_case : Tuple = 20
__snake_case : Dict = self._get_uniform_logits(batch_size=2 , length=__magic_name__ )
# tweak scores to not be uniform anymore
__snake_case : int = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__snake_case : Optional[int] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__snake_case : str = jax.nn.softmax(__magic_name__ , axis=-1 )
__snake_case : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
__snake_case : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(__magic_name__ , scores.copy() , cur_len=__magic_name__ ) , axis=-1 )
__snake_case : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(__magic_name__ , scores.copy() , cur_len=__magic_name__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : int = None
__snake_case : List[str] = 10
__snake_case : List[Any] = 2
# create ramp distribution
__snake_case : Optional[Any] = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, vocab_size) ).copy()
__snake_case : Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
__snake_case : List[str] = FlaxTopKLogitsWarper(3 )
__snake_case : Dict = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__snake_case : Any = 5
__snake_case : Dict = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__snake_case : Any = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, length) ).copy()
__snake_case : List[str] = top_k_warp_safety_check(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = None
__snake_case : Any = 10
__snake_case : List[str] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__snake_case : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__snake_case : Any = FlaxTopPLogitsWarper(0.8 )
__snake_case : Tuple = np.exp(top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__snake_case : Any = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__snake_case : Optional[Any] = np.broadcast_to(np.arange(__magic_name__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__snake_case : Optional[int] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__snake_case : Union[str, Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__snake_case : Union[str, Any] = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = 20
__snake_case : Tuple = 4
__snake_case : List[Any] = 0
__snake_case : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ )
# check that min length is applied at length 5
__snake_case : Union[str, Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
__snake_case : int = 5
__snake_case : Dict = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Any = min_dist_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__snake_case : str = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = 15
__snake_case : str = min_dist_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertFalse(jnp.isinf(__magic_name__ ).any() )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Dict = 20
__snake_case : List[str] = 4
__snake_case : Tuple = 0
__snake_case : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ )
# check that all scores are -inf except the bos_token_id score
__snake_case : List[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
__snake_case : Optional[int] = 1
__snake_case : Tuple = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : List[str] = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__snake_case : Any = 3
__snake_case : Union[str, Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Any = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertFalse(jnp.isinf(__magic_name__ ).any() )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
__snake_case : List[Any] = 20
__snake_case : Union[str, Any] = 4
__snake_case : Dict = 0
__snake_case : Optional[int] = 5
__snake_case : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
__snake_case : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=20 )
__snake_case : int = 4
__snake_case : Any = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Dict = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__snake_case : Optional[Any] = 3
__snake_case : Optional[Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : Any = logits_processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
self.assertFalse(jnp.isinf(__magic_name__ ).any() )
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = 4
__snake_case : int = 10
__snake_case : List[Any] = 15
__snake_case : Union[str, Any] = 2
__snake_case : List[str] = 1
__snake_case : Optional[Any] = 15
# dummy input_ids and scores
__snake_case : Dict = ids_tensor((batch_size, sequence_length) , __magic_name__ )
__snake_case : Any = input_ids.copy()
__snake_case : List[Any] = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : str = scores.copy()
# instantiate all dist processors
__snake_case : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : Union[str, Any] = FlaxTopKLogitsWarper(3 )
__snake_case : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__snake_case : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ )
__snake_case : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ )
__snake_case : str = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ )
__snake_case : Tuple = 10
# no processor list
__snake_case : Dict = temp_dist_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Any = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : str = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Optional[int] = min_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Optional[Any] = bos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Optional[Any] = eos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# with processor list
__snake_case : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__snake_case : int = processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
# scores should be equal
self.assertTrue(jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = 4
__snake_case : Optional[int] = 10
__snake_case : Optional[Any] = 15
__snake_case : Dict = 2
__snake_case : Tuple = 1
__snake_case : Optional[int] = 15
# dummy input_ids and scores
__snake_case : List[Any] = ids_tensor((batch_size, sequence_length) , __magic_name__ )
__snake_case : str = input_ids.copy()
__snake_case : str = self._get_uniform_logits(__magic_name__ , __magic_name__ )
__snake_case : int = scores.copy()
# instantiate all dist processors
__snake_case : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__snake_case : Optional[Any] = FlaxTopKLogitsWarper(3 )
__snake_case : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__snake_case : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=__magic_name__ )
__snake_case : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=__magic_name__ )
__snake_case : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=__magic_name__ , eos_token_id=__magic_name__ )
__snake_case : Dict = 10
# no processor list
def run_no_processor_list(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : int ):
__snake_case : Union[str, Any] = temp_dist_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Tuple = top_k_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : List[Any] = top_p_warp(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : Union[str, Any] = min_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : List[Any] = bos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
__snake_case : List[str] = eos_dist_proc(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
return scores
# with processor list
def run_processor_list(__magic_name__ : int , __magic_name__ : Any , __magic_name__ : Dict ):
__snake_case : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__snake_case : Optional[Any] = processor(__magic_name__ , __magic_name__ , cur_len=__magic_name__ )
return scores
__snake_case : int = jax.jit(__magic_name__ )
__snake_case : List[Any] = jax.jit(__magic_name__ )
__snake_case : List[str] = jitted_run_no_processor_list(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : Tuple = jitted_run_processor_list(__magic_name__ , __magic_name__ , __magic_name__ )
# scores should be equal
self.assertTrue(jnp.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 26 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _A ( __lowercase ):
lowercase__: Tuple = '''trocr'''
lowercase__: Tuple = ['''past_key_values''']
lowercase__: Tuple = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : Dict , __magic_name__ : List[Any]=5_02_65 , __magic_name__ : Tuple=10_24 , __magic_name__ : List[Any]=12 , __magic_name__ : int=16 , __magic_name__ : Optional[Any]=40_96 , __magic_name__ : str="gelu" , __magic_name__ : Optional[int]=5_12 , __magic_name__ : int=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : str=0.0 , __magic_name__ : Dict=2 , __magic_name__ : Dict=0.02 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=False , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[str]=1 , __magic_name__ : str=0 , __magic_name__ : Dict=2 , **__magic_name__ : List[str] , ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = vocab_size
__snake_case : Union[str, Any] = d_model
__snake_case : List[Any] = decoder_layers
__snake_case : Optional[Any] = decoder_attention_heads
__snake_case : Optional[int] = decoder_ffn_dim
__snake_case : Optional[int] = activation_function
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[int] = dropout
__snake_case : str = attention_dropout
__snake_case : List[str] = activation_dropout
__snake_case : str = init_std
__snake_case : List[str] = decoder_layerdrop
__snake_case : Any = use_cache
__snake_case : int = scale_embedding
__snake_case : List[Any] = use_learned_position_embeddings
__snake_case : int = layernorm_embedding
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
| 26 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 1 |
'''simple docstring'''
import functools
from typing import Any
def _a ( _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
__snake_case : dict[str, Any] = {}
__snake_case : str = """WORD_KEEPER"""
for word in words:
__snake_case : str = trie
for c in word:
if c not in trie_node:
__snake_case : List[Any] = {}
__snake_case : Optional[int] = trie_node[c]
__snake_case : List[str] = True
__snake_case : List[str] = len(_lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCamelCase ) -> bool:
if index == len_string:
return True
__snake_case : Optional[int] = trie
for i in range(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = trie_node.get(string[i] , _lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if gpta_config_file == "":
__snake_case : Any = GPTaConfig()
else:
__snake_case : Dict = GPTaConfig.from_json_file(_lowerCamelCase )
__snake_case : Optional[int] = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
__snake_case : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__snake_case : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__UpperCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 26 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
__UpperCamelCase = 100
__UpperCamelCase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__UpperCamelCase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _a ( _lowerCamelCase ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case : set[int] = set()
__snake_case : int
__snake_case : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a ( _lowerCamelCase = 5000 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , _lowerCamelCase ):
if len(partition(_lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _A :
def __init__( self : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=None , __magic_name__ : Dict=None , __magic_name__ : List[str]=None , __magic_name__ : int="resnet50" , __magic_name__ : Dict=3 , __magic_name__ : Union[str, Any]=32 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Dict=True , __magic_name__ : List[str]=True , ) -> Tuple:
"""simple docstring"""
__snake_case : Union[str, Any] = parent
__snake_case : List[Any] = out_indices if out_indices is not None else [4]
__snake_case : List[Any] = stage_names
__snake_case : Tuple = out_features
__snake_case : Any = backbone
__snake_case : List[str] = batch_size
__snake_case : int = image_size
__snake_case : Tuple = num_channels
__snake_case : Optional[int] = use_pretrained_backbone
__snake_case : Any = is_training
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : List[Any] = self.get_config()
return config, pixel_values
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case : int = self.prepare_config_and_inputs()
__snake_case , __snake_case : Dict = config_and_inputs
__snake_case : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowercase__: str = (TimmBackbone,) if is_torch_available() else ()
lowercase__: Optional[int] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
lowercase__: Tuple = False
lowercase__: List[str] = False
lowercase__: Any = False
lowercase__: Optional[int] = False
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : Dict = TimmBackboneModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = """resnet18"""
__snake_case : Union[str, Any] = """microsoft/resnet-18"""
__snake_case : Union[str, Any] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
__snake_case : Any = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case : Union[str, Any] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
__snake_case : Tuple = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowercase__ ( self : str ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowercase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(__magic_name__ )
__snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : str = [*signature.parameters.keys()]
__snake_case : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
__snake_case : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case : Tuple = self.all_model_classes[0]
__snake_case : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
__snake_case : str = self._prepare_for_class(__magic_name__ , __magic_name__ )
__snake_case : Optional[int] = model(**__magic_name__ )
__snake_case : Optional[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case : Union[str, Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case : int = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Any = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case : Dict = copy.deepcopy(__magic_name__ )
__snake_case : Optional[Any] = None
__snake_case : int = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : int = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case : Union[str, Any] = copy.deepcopy(__magic_name__ )
__snake_case : Optional[int] = False
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : int = model(**__magic_name__ )
| 26 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 1 |
'''simple docstring'''
from math import factorial
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
__snake_case : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__snake_case : List[str] = float(factorial(_lowerCamelCase ) )
coefficient /= factorial(_lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 26 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase ) -> None:
"""simple docstring"""
create_state_space_tree(_lowerCamelCase , [] , 0 , [0 for i in range(len(_lowerCamelCase ) )] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> None:
"""simple docstring"""
if index == len(_lowerCamelCase ):
print(_lowerCamelCase )
return
for i in range(len(_lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__snake_case : Tuple = True
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , index + 1 , _lowerCamelCase )
current_sequence.pop()
__snake_case : str = False
__UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 26 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _A ( __lowercase ):
lowercase__: Any = '''decision_transformer'''
lowercase__: Union[str, Any] = ['''past_key_values''']
lowercase__: List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Tuple , __magic_name__ : Optional[Any]=17 , __magic_name__ : Any=4 , __magic_name__ : Union[str, Any]=1_28 , __magic_name__ : Optional[Any]=40_96 , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=1 , __magic_name__ : Optional[Any]=10_24 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Tuple=1 , __magic_name__ : Dict=None , __magic_name__ : Tuple="relu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : str=1E-5 , __magic_name__ : Dict=0.02 , __magic_name__ : List[str]=True , __magic_name__ : List[str]=True , __magic_name__ : Optional[int]=5_02_56 , __magic_name__ : Optional[Any]=5_02_56 , __magic_name__ : int=False , __magic_name__ : Union[str, Any]=False , **__magic_name__ : int , ) -> int:
"""simple docstring"""
__snake_case : str = state_dim
__snake_case : Optional[Any] = act_dim
__snake_case : Dict = hidden_size
__snake_case : int = max_ep_len
__snake_case : Any = action_tanh
__snake_case : Union[str, Any] = vocab_size
__snake_case : Optional[Any] = n_positions
__snake_case : Optional[int] = n_layer
__snake_case : List[Any] = n_head
__snake_case : Tuple = n_inner
__snake_case : int = activation_function
__snake_case : List[Any] = resid_pdrop
__snake_case : Union[str, Any] = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Optional[Any] = initializer_range
__snake_case : str = scale_attn_weights
__snake_case : str = use_cache
__snake_case : List[Any] = scale_attn_by_inverse_layer_idx
__snake_case : Dict = reorder_and_upcast_attn
__snake_case : Any = bos_token_id
__snake_case : Dict = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 1 |
'''simple docstring'''
import numpy as np
import qiskit
def _a ( _lowerCamelCase = 8 , _lowerCamelCase = None ) -> str:
"""simple docstring"""
__snake_case : Any = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__snake_case : Optional[int] = 6 * key_len
# Measurement basis for Alice's qubits.
__snake_case : str = rng.integers(2 , size=_lowerCamelCase )
# The set of states Alice will prepare.
__snake_case : Any = rng.integers(2 , size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
__snake_case : Any = rng.integers(2 , size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
__snake_case : Dict = qiskit.QuantumCircuit(_lowerCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__snake_case : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__snake_case : Any = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1 , seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
__snake_case : str = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__snake_case : Any = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__snake_case : Any = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase , """0""" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 26 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Union[str, Any] = len(_lowerCamelCase )
# We need to create solution object to save path.
__snake_case : Optional[Any] = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
__snake_case : int = run_maze(_lowerCamelCase , 0 , 0 , _lowerCamelCase )
if solved:
print("""\n""".join(str(_lowerCamelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : str = len(_lowerCamelCase )
# Final check point.
if i == j == (size - 1):
__snake_case : Tuple = 1
return True
__snake_case : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
__snake_case : int = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__snake_case : Union[str, Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__snake_case : Union[str, Any] = 1
# check for directions
if (
run_maze(_lowerCamelCase , i + 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j + 1 , _lowerCamelCase )
or run_maze(_lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase )
or run_maze(_lowerCamelCase , _lowerCamelCase , j - 1 , _lowerCamelCase )
):
return True
__snake_case : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__snake_case : Union[str, Any] = precision
__snake_case : List[Any] = ceil(precision / 14 )
__snake_case : List[str] = 42_6880 * Decimal(1_0005 ).sqrt()
__snake_case : int = 1
__snake_case : Tuple = 1359_1409
__snake_case : Any = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
__snake_case : str = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCamelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 26 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] , __magic_name__ : Path , __magic_name__ : Union[str, None] = None , __magic_name__ : Union[List[str], None] = None , __magic_name__ : Union[str, List[str], None] = None , __magic_name__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = [file for file in os.listdir(__magic_name__ ) if os.path.isfile(os.path.join(__magic_name__ , __magic_name__ ) )]
if identifier is not None:
__snake_case : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__magic_name__ , __magic_name__ ):
for n_ in n_identifier:
__snake_case : Optional[int] = [file for file in files if n_ not in file]
else:
__snake_case : Tuple = [file for file in files if n_identifier not in file]
__snake_case : Dict = ignore_files or []
ignore_files.append("""__init__.py""" )
__snake_case : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __magic_name__ )
if only_modules:
__snake_case : List[Any] = file.split(""".""" )[0]
try:
__snake_case : List[Any] = getattr(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = doctest.DocTestSuite(__magic_name__ )
__snake_case : Dict = unittest.TextTestRunner().run(__magic_name__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
__snake_case : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[Any] = """modeling"""
__snake_case : Union[str, Any] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__magic_name__ , identifier=__magic_name__ , ignore_files=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = Path("""src/transformers""" )
__snake_case : Any = """tokenization"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = Path("""src/transformers""" )
__snake_case : List[str] = """configuration"""
self.analyze_directory(__magic_name__ , identifier=__magic_name__ )
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = Path("""src/transformers""" )
__snake_case : int = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__magic_name__ , n_identifier=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = Path("""docs/source""" )
__snake_case : Optional[int] = ["""favicon.ico"""]
self.analyze_directory(__magic_name__ , ignore_files=__magic_name__ , only_modules=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _a ( _lowerCamelCase="ro" , _lowerCamelCase="en" , _lowerCamelCase="wmt16" , _lowerCamelCase=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__snake_case : Dict = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
__snake_case : Tuple = datasets.load_dataset(_lowerCamelCase , _lowerCamelCase )
if save_dir is None:
__snake_case : Optional[Any] = F'''{dataset}-{pair}'''
__snake_case : List[Any] = Path(_lowerCamelCase )
save_dir.mkdir(exist_ok=_lowerCamelCase )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__snake_case : List[str] = """val""" if split == """validation""" else split
__snake_case : List[Any] = save_dir.joinpath(F'''{fn}.source''' )
__snake_case : Union[str, Any] = save_dir.joinpath(F'''{fn}.target''' )
__snake_case : Union[str, Any] = src_path.open("""w+""" )
__snake_case : Dict = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__snake_case : Optional[Any] = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 26 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 1 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCamelCase = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = _TestCommandArgs(dataset=_lowerCamelCase , all_configs=_lowerCamelCase , save_infos=_lowerCamelCase )
__snake_case : Any = TestCommand(*_lowerCamelCase )
test_command.run()
__snake_case : Tuple = os.path.join(_lowerCamelCase , """README.md""" )
assert os.path.exists(_lowerCamelCase )
__snake_case : Optional[int] = DatasetInfosDict.from_directory(_lowerCamelCase )
__snake_case : Dict = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 235_1563,
"""num_examples""": 1_0000,
},
{
"""name""": """validation""",
"""num_bytes""": 23_8418,
"""num_examples""": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__snake_case , __snake_case : Dict = getattr(dataset_infos["""default"""] , _lowerCamelCase ), getattr(expected_dataset_infos["""default"""] , _lowerCamelCase )
if key == "num_bytes":
assert is_apercent_close(_lowerCamelCase , _lowerCamelCase )
elif key == "splits":
assert list(_lowerCamelCase ) == list(_lowerCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 26 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__UpperCamelCase = HUGGINGFACE_HUB_CACHE
__UpperCamelCase = "config.json"
__UpperCamelCase = "diffusion_pytorch_model.bin"
__UpperCamelCase = "diffusion_flax_model.msgpack"
__UpperCamelCase = "model.onnx"
__UpperCamelCase = "diffusion_pytorch_model.safetensors"
__UpperCamelCase = "weights.pb"
__UpperCamelCase = "https://huggingface.co"
__UpperCamelCase = default_cache_path
__UpperCamelCase = "diffusers_modules"
__UpperCamelCase = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
__UpperCamelCase = ["fp16", "non-ema"]
__UpperCamelCase = ".self_attn"
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _A ( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Tuple = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__snake_case : Union[str, Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__snake_case : Any = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__snake_case : Dict = tf_top_k_top_p_filtering(__magic_name__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__snake_case : List[Any] = output[output != -float("""inf""" )]
__snake_case : List[str] = tf.cast(
tf.where(tf.not_equal(__magic_name__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-12 )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@require_tf
class _A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__: Optional[Any] = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Union[str, Any] = 2
__snake_case : str = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : List[str] ) -> Tuple:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : int = [[2, 0], [1_02, 1_03]]
__snake_case : Dict = [[1, 0], [1, 1]]
__snake_case : Tuple = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : Any = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(__magic_name__ ) + 1 ):
__snake_case : Dict = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__snake_case : Any = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : str = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : str = 1
__snake_case : Any = 2
class _A ( tf.Module ):
def __init__( self : str , __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
super(__magic_name__ , self ).__init__()
__snake_case : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__magic_name__ , )
def lowercase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : int ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = self.model.generate(
input_ids=__magic_name__ , attention_mask=__magic_name__ , max_new_tokens=__magic_name__ , return_dict_in_generate=__magic_name__ , )
return {"sequences": outputs["sequences"]}
__snake_case : List[str] = [[2], [1_02, 1_03]]
__snake_case : Optional[Any] = [[1], [1, 1]]
__snake_case : int = DummyModel(model=__magic_name__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"""serving_default""": dummy_model.serving} )
__snake_case : List[str] = tf.saved_model.load(__magic_name__ ).signatures["""serving_default"""]
for input_row in range(len(__magic_name__ ) ):
__snake_case : int = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__snake_case : Optional[int] = serving_func(**__magic_name__ )["""sequences"""]
__snake_case : List[str] = test_model.generate(**__magic_name__ , max_new_tokens=__magic_name__ )
tf.debugging.assert_equal(__magic_name__ , __magic_name__ )
@slow
@require_tensorflow_text
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__magic_name__ )
class _A ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ) -> int:
"""simple docstring"""
super().__init__()
__snake_case : Tuple = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__magic_name__ , """spiece.model""" ) , """rb""" ).read() )
__snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : Optional[int] , __magic_name__ : Tuple , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.tokenize(__magic_name__ )
__snake_case , __snake_case : List[Any] = text.pad_model_inputs(
__magic_name__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__snake_case : Optional[Any] = self.model.generate(input_ids=__magic_name__ , attention_mask=__magic_name__ )
return self.tokenizer.detokenize(__magic_name__ )
__snake_case : Tuple = CompleteSentenceTransformer()
__snake_case : Optional[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__snake_case : Any = complete_model(__magic_name__ )
__snake_case : Any = tf.keras.Model(__magic_name__ , __magic_name__ )
keras_model.save(__magic_name__ )
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : Any = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__snake_case : Union[str, Any] = 14
__snake_case : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : List[str] = """Hello, my dog is cute and"""
__snake_case : Dict = tokenizer(__magic_name__ , return_tensors="""tf""" )
__snake_case : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__snake_case : Union[str, Any] = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Any = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__snake_case : Dict = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__snake_case : Tuple = model.generate(**__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
__snake_case : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Union[str, Any] = """Hugging Face is a technology company based in New York and Paris."""
__snake_case : Tuple = bart_tokenizer(__magic_name__ , return_tensors="""tf""" ).input_ids
__snake_case : Tuple = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Any = bart_model.generate(__magic_name__ ).numpy()
class _A ( __lowercase ):
def lowercase__ ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : Dict=None , **__magic_name__ : Dict ) -> str:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : Dict = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__snake_case : Any = bart_model.generate(__magic_name__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__magic_name__ , __magic_name__ ) )
class _A ( bart_model.model.encoder.__class__ ):
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Tuple , **__magic_name__ : Any ) -> int:
"""simple docstring"""
return super().call(__magic_name__ , **__magic_name__ )
__snake_case : int = FakeEncoder(bart_model.config , bart_model.model.shared )
__snake_case : Optional[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__snake_case : Dict = bart_model.generate(__magic_name__ ).numpy()
with self.assertRaises(__magic_name__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__magic_name__ , foo="""bar""" )
| 26 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
__snake_case : List[Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , _lowerCamelCase )
if matches:
__snake_case : Optional[Any] = float(matches[1] )
__snake_case : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case : Tuple = 1001
__snake_case : Any = """imagenet-1k-id2label.json"""
__snake_case : Optional[Any] = """huggingface/label-files"""
__snake_case : List[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : Dict = {int(_lowerCamelCase ) + 1: v for k, v in idalabel.items()}
__snake_case : List[str] = """background"""
__snake_case : List[str] = idalabel
__snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[int] = get_mobilenet_va_config(_lowerCamelCase )
# Load 🤗 model
__snake_case : Optional[Any] = MobileNetVaForImageClassification(_lowerCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case : Optional[int] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case : str = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
__snake_case : Tuple = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
__snake_case : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
__snake_case : Optional[Any] = """google/""" + model_name
image_processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCamelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 26 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A ( __lowercase ):
lowercase__: str = ['''pixel_values''']
def __init__( self : Union[str, Any] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_55 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : bool = True , **__magic_name__ : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**__magic_name__ )
__snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 2_24}
__snake_case : Union[str, Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
__snake_case : Any = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case : str = get_size_dict(__magic_name__ , default_to_square=__magic_name__ , param_name="""crop_size""" )
__snake_case : Any = do_resize
__snake_case : Optional[Any] = size
__snake_case : List[str] = resample
__snake_case : Optional[int] = do_center_crop
__snake_case : List[str] = crop_size
__snake_case : Dict = do_rescale
__snake_case : List[Any] = rescale_factor
__snake_case : Tuple = do_normalize
__snake_case : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case : Tuple = do_convert_rgb
def lowercase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Tuple , ) -> np.ndarray:
"""simple docstring"""
__snake_case : Optional[int] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__snake_case : Tuple = get_resize_output_image_size(__magic_name__ , size=size["""shortest_edge"""] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : str , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Dict , ) -> np.ndarray:
"""simple docstring"""
__snake_case : str = get_size_dict(__magic_name__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> Tuple:
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : ImageInput , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : int = None , __magic_name__ : bool = None , __magic_name__ : float = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : bool = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
__snake_case : List[str] = do_resize if do_resize is not None else self.do_resize
__snake_case : Any = size if size is not None else self.size
__snake_case : List[str] = get_size_dict(__magic_name__ , param_name="""size""" , default_to_square=__magic_name__ )
__snake_case : Dict = resample if resample is not None else self.resample
__snake_case : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__snake_case : str = get_size_dict(__magic_name__ , param_name="""crop_size""" , default_to_square=__magic_name__ )
__snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : int = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
__snake_case : List[Any] = image_std if image_std is not None else self.image_std
__snake_case : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case : Optional[int] = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case : List[str] = [convert_to_rgb(__magic_name__ ) for image in images]
# All transformations expect numpy arrays.
__snake_case : str = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
__snake_case : List[str] = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
__snake_case : Any = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
__snake_case : Optional[Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
__snake_case : int = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
__snake_case : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
__snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 26 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowercase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : List[str]="binary" , __magic_name__ : Tuple=None , __magic_name__ : Dict="warn" , ) -> Any:
"""simple docstring"""
__snake_case : Tuple = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 26 | 1 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__UpperCamelCase = int(input("Enter number: ").strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 26 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__magic_name__ , scheduler=__magic_name__ )
@torch.no_grad()
def __call__( self : Any , __magic_name__ : int = 1 , __magic_name__ : int = 1_00 , __magic_name__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __magic_name__ : Optional[float] = None , __magic_name__ : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
__snake_case : Optional[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
__snake_case : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
__snake_case : List[str] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
__snake_case : List[str] = int(__magic_name__ )
if sample_size % down_scale_factor != 0:
__snake_case : Tuple = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
__snake_case : List[str] = int(__magic_name__ )
__snake_case : Optional[int] = next(iter(self.unet.parameters() ) ).dtype
__snake_case : Optional[Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__magic_name__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__snake_case : List[Any] = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
# set step values
self.scheduler.set_timesteps(__magic_name__ , device=audio.device )
__snake_case : Union[str, Any] = self.scheduler.timesteps.to(__magic_name__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ ).sample
# 2. compute previous image: x_t -> t_t-1
__snake_case : Optional[int] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
__snake_case : int = audio.clamp(-1 , 1 ).float().cpu().numpy()
__snake_case : str = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__magic_name__ )
| 26 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCamelCase = "http://www.mocksite.com/file1.txt"
__UpperCamelCase = "\"text\": [\"foo\", \"foo\"]"
__UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _A :
lowercase__: str = 200
lowercase__: List[str] = {'''Content-Length''': '''100'''}
lowercase__: Union[str, Any] = {}
def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return [bytes(__magic_name__ , """utf-8""" )]
def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase )
__snake_case : Union[str, Any] = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : str = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Union[str, Any] = {"""train""": url}
__snake_case : Dict = """dummy"""
__snake_case : List[str] = """downloads"""
__snake_case : List[Any] = tmp_path
__snake_case : List[Any] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : int = dl_manager.download(_lowerCamelCase )
__snake_case : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Any = [downloaded_paths]
__snake_case : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
__snake_case : Tuple = downloaded_paths.values()
__snake_case : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case : List[str] = Path(_lowerCamelCase )
__snake_case : Any = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case : Union[str, Any] = downloaded_path.read_text()
assert content == CONTENT
__snake_case : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
__snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Optional[int] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Tuple = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = {"""train""": filename}
__snake_case : Optional[Any] = """dummy"""
__snake_case : List[Any] = xz_file.parent
__snake_case : int = """extracted"""
__snake_case : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
__snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
__snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase )
__snake_case : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__snake_case : Dict = [extracted_paths]
__snake_case : int = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
__snake_case : int = extracted_paths.values()
__snake_case : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case : Any = Path(_lowerCamelCase )
__snake_case : str = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case : Optional[int] = extracted_path.read_text()
__snake_case : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
__snake_case : Tuple = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
__snake_case : Any = request.getfixturevalue(_lowerCamelCase )
__snake_case : str = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : int = request.getfixturevalue(_lowerCamelCase )
__snake_case : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 26 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
@staticmethod
def lowercase__ ( *__magic_name__ : Dict , **__magic_name__ : Tuple ) -> List[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class _A ( unittest.TestCase ):
lowercase__: Any = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase__ ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Any ) -> str:
"""simple docstring"""
__snake_case : Tuple = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__snake_case : Union[str, Any] = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowercase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = vqa_pipeline(__magic_name__ , top_k=1 )
self.assertEqual(
__magic_name__ , [
[{"""score""": ANY(__magic_name__ ), """answer""": ANY(__magic_name__ )}],
[{"""score""": ANY(__magic_name__ ), """answer""": ANY(__magic_name__ )}],
] , )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
__snake_case : Optional[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__snake_case : Any = """How many cats are there?"""
__snake_case : List[Any] = vqa_pipeline(image=__magic_name__ , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__magic_name__ , [{"""score""": ANY(__magic_name__ ), """answer""": ANY(__magic_name__ )}, {"""score""": ANY(__magic_name__ ), """answer""": ANY(__magic_name__ )}] )
__snake_case : List[str] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__magic_name__ , [{"""score""": ANY(__magic_name__ ), """answer""": ANY(__magic_name__ )}, {"""score""": ANY(__magic_name__ ), """answer""": ANY(__magic_name__ )}] )
@slow
@require_torch
def lowercase__ ( self : int ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
__snake_case : Dict = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__snake_case : Any = """How many cats are there?"""
__snake_case : Tuple = vqa_pipeline(image=__magic_name__ , question=__magic_name__ , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__snake_case : str = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] )
__snake_case : Tuple = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
pass
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase = 100 ) -> int:
"""simple docstring"""
__snake_case : Any = n * (n + 1) * (2 * n + 1) / 6
__snake_case : List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : Tuple = emb.weight.shape
__snake_case : List[str] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : List[str] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase="facebook/mbart-large-en-ro" , _lowerCamelCase=False , _lowerCamelCase=False ) -> List[str]:
"""simple docstring"""
__snake_case : Any = torch.load(_lowerCamelCase , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : List[str] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__snake_case : Tuple = MBartConfig.from_pretrained(_lowerCamelCase , vocab_size=_lowerCamelCase )
if mbart_aa and finetuned:
__snake_case : Dict = """relu"""
__snake_case : Union[str, Any] = state_dict["""decoder.embed_tokens.weight"""]
__snake_case : Optional[int] = MBartForConditionalGeneration(_lowerCamelCase )
model.model.load_state_dict(_lowerCamelCase )
if finetuned:
__snake_case : Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _A :
def __init__( self : str , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float = 0 ) -> None:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = row, column
__snake_case : Dict = [[default_value for c in range(__magic_name__ )] for r in range(__magic_name__ )]
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
__snake_case : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__snake_case : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__snake_case : Optional[int] = max(__magic_name__ , len(str(__magic_name__ ) ) )
__snake_case : str = f'''%{max_element_length}s'''
# Make string and return
def single_line(__magic_name__ : list[float] ) -> str:
nonlocal string_format_identifier
__snake_case : Union[str, Any] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__magic_name__ ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
"""simple docstring"""
return str(self )
def lowercase__ ( self : Dict , __magic_name__ : tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__magic_name__ , (list, tuple) ) and len(__magic_name__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , __magic_name__ : tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , __magic_name__ : tuple[int, int] , __magic_name__ : float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__magic_name__ )
__snake_case : Optional[int] = value
def __add__( self : Any , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert self.row == another.row and self.column == another.column
# Add
__snake_case : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ) -> Matrix:
"""simple docstring"""
__snake_case : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : List[Any] = -self[r, c]
return result
def __sub__( self : Optional[int] , __magic_name__ : Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : List[Any] , __magic_name__ : int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__magic_name__ , (int, float) ): # Scalar multiplication
__snake_case : Optional[int] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : Tuple = self[r, c] * another
return result
elif isinstance(__magic_name__ , __magic_name__ ): # Matrix multiplication
assert self.column == another.row
__snake_case : Dict = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__snake_case : Optional[int] = f'''Unsupported type given for another ({type(__magic_name__ )})'''
raise TypeError(__magic_name__ )
def lowercase__ ( self : str ) -> Matrix:
"""simple docstring"""
__snake_case : Any = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__snake_case : str = self[r, c]
return result
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Matrix , __magic_name__ : Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__snake_case : List[str] = v.transpose()
__snake_case : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__snake_case : Any = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
__snake_case : Dict = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Union[str, Any] = 1, 2, -3
__snake_case : str = Matrix(3 , 1 , 0 )
__snake_case , __snake_case , __snake_case : Tuple = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}''' )
def _a ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 26 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
lowercase__: List[Any] = ['''pixel_values''']
def __init__( self : Any , __magic_name__ : bool = True , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = PILImageResampling.BILINEAR , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_55 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , **__magic_name__ : Any , ) -> None:
"""simple docstring"""
super().__init__(**__magic_name__ )
__snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 2_56}
__snake_case : Dict = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
__snake_case : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case : Any = get_size_dict(__magic_name__ )
__snake_case : Optional[Any] = do_resize
__snake_case : Dict = size
__snake_case : Any = resample
__snake_case : str = do_center_crop
__snake_case : Optional[int] = crop_size
__snake_case : int = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : Any = do_normalize
__snake_case : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : List[Any] , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
__snake_case : Optional[Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__snake_case : Union[str, Any] = get_resize_output_image_size(__magic_name__ , size=size["""shortest_edge"""] , default_to_square=__magic_name__ )
return resize(__magic_name__ , size=__magic_name__ , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : str , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
__snake_case : int = get_size_dict(__magic_name__ )
return center_crop(__magic_name__ , size=(size["""height"""], size["""width"""]) , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : str , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Any ) -> np.ndarray:
"""simple docstring"""
return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def lowercase__ ( self : int , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : bool = None , __magic_name__ : Dict[str, int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[float, List[float]]] = None , __magic_name__ : Optional[Union[str, TensorType]] = None , __magic_name__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__magic_name__ : List[Any] , ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : Optional[int] = size if size is not None else self.size
__snake_case : Any = get_size_dict(__magic_name__ , default_to_square=__magic_name__ )
__snake_case : List[Any] = resample if resample is not None else self.resample
__snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : str = crop_size if crop_size is not None else self.crop_size
__snake_case : str = get_size_dict(__magic_name__ )
__snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__snake_case : Tuple = image_std if image_std is not None else self.image_std
__snake_case : int = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__snake_case : str = [to_numpy_array(__magic_name__ ) for image in images]
if do_resize:
__snake_case : int = [self.resize(image=__magic_name__ , size=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_center_crop:
__snake_case : Any = [self.center_crop(image=__magic_name__ , size=__magic_name__ ) for image in images]
if do_rescale:
__snake_case : List[Any] = [self.rescale(image=__magic_name__ , scale=__magic_name__ ) for image in images]
if do_normalize:
__snake_case : Tuple = [self.normalize(image=__magic_name__ , mean=__magic_name__ , std=__magic_name__ ) for image in images]
__snake_case : Dict = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
__snake_case : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 26 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 | 1 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
__UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
__UpperCamelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__UpperCamelCase = f"""down_blocks.{i}.resnets.{j}."""
__UpperCamelCase = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__UpperCamelCase = f"""down_blocks.{i}.attentions.{j}."""
__UpperCamelCase = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__UpperCamelCase = f"""up_blocks.{i}.resnets.{j}."""
__UpperCamelCase = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__UpperCamelCase = f"""up_blocks.{i}.attentions.{j}."""
__UpperCamelCase = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__UpperCamelCase = f"""down_blocks.{i}.downsamplers.0.conv."""
__UpperCamelCase = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__UpperCamelCase = f"""up_blocks.{i}.upsamplers.0."""
__UpperCamelCase = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__UpperCamelCase = "mid_block.attentions.0."
__UpperCamelCase = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__UpperCamelCase = f"""mid_block.resnets.{j}."""
__UpperCamelCase = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__snake_case : Dict = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__snake_case : Optional[int] = v.replace(_lowerCamelCase , _lowerCamelCase )
__snake_case : Optional[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__snake_case : Tuple = v.replace(_lowerCamelCase , _lowerCamelCase )
__snake_case : int = v
__snake_case : str = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__UpperCamelCase = f"""encoder.down_blocks.{i}.resnets.{j}."""
__UpperCamelCase = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__UpperCamelCase = f"""down_blocks.{i}.downsamplers.0."""
__UpperCamelCase = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__UpperCamelCase = f"""up_blocks.{i}.upsamplers.0."""
__UpperCamelCase = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__UpperCamelCase = f"""decoder.up_blocks.{i}.resnets.{j}."""
__UpperCamelCase = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__UpperCamelCase = f"""mid_block.resnets.{i}."""
__UpperCamelCase = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__snake_case : Optional[int] = v.replace(_lowerCamelCase , _lowerCamelCase )
__snake_case : Dict = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__snake_case : Optional[int] = v.replace(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = v
__snake_case : List[Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
__snake_case : Optional[Any] = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'''mid.attn_1.{weight_name}.weight''' in k:
print(F'''Reshaping {k} for SD format''' )
__snake_case : Any = reshape_weight_for_sd(_lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
__UpperCamelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__UpperCamelCase = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__UpperCamelCase = {"q": 0, "k": 1, "v": 2}
def _a ( _lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = {}
__snake_case : List[str] = {}
__snake_case : Union[str, Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
__snake_case : Tuple = k[: -len(""".q_proj.weight""" )]
__snake_case : str = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
__snake_case : Optional[Any] = [None, None, None]
__snake_case : Optional[int] = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
__snake_case : Tuple = k[: -len(""".q_proj.bias""" )]
__snake_case : Optional[int] = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
__snake_case : int = [None, None, None]
__snake_case : Union[str, Any] = v
continue
__snake_case : str = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
__snake_case : Tuple = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__snake_case : Tuple = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
__snake_case : int = torch.cat(_lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
__snake_case : Any = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
__snake_case : Optional[int] = torch.cat(_lowerCamelCase )
return new_state_dict
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
__UpperCamelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__UpperCamelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
__UpperCamelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
__UpperCamelCase = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__UpperCamelCase = load_file(unet_path, device="cpu")
else:
__UpperCamelCase = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
__UpperCamelCase = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
__UpperCamelCase = load_file(vae_path, device="cpu")
else:
__UpperCamelCase = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
__UpperCamelCase = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
__UpperCamelCase = load_file(text_enc_path, device="cpu")
else:
__UpperCamelCase = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
__UpperCamelCase = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
__UpperCamelCase = convert_unet_state_dict(unet_state_dict)
__UpperCamelCase = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__UpperCamelCase = convert_vae_state_dict(vae_state_dict)
__UpperCamelCase = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__UpperCamelCase = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__UpperCamelCase = {"transformer." + k: v for k, v in text_enc_dict.items()}
__UpperCamelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
__UpperCamelCase = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
__UpperCamelCase = convert_text_enc_state_dict(text_enc_dict)
__UpperCamelCase = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__UpperCamelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__UpperCamelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__UpperCamelCase = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 26 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _A ( nn.Module ):
def __init__( self : Optional[Any] , __magic_name__ : int = 16 , __magic_name__ : int = 88 , __magic_name__ : Optional[int] = None , __magic_name__ : int = 1 , __magic_name__ : float = 0.0 , __magic_name__ : int = 32 , __magic_name__ : Optional[int] = None , __magic_name__ : bool = False , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "geglu" , __magic_name__ : Optional[int] = None , ) -> Any:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__magic_name__ , attention_head_dim=__magic_name__ , in_channels=__magic_name__ , num_layers=__magic_name__ , dropout=__magic_name__ , norm_num_groups=__magic_name__ , cross_attention_dim=__magic_name__ , attention_bias=__magic_name__ , sample_size=__magic_name__ , num_vector_embeds=__magic_name__ , activation_fn=__magic_name__ , num_embeds_ada_norm=__magic_name__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__snake_case : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__snake_case : Any = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__snake_case : int = [1, 0]
def lowercase__ ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : List[str]=None , __magic_name__ : int=None , __magic_name__ : Optional[Any]=None , __magic_name__ : bool = True , ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = hidden_states
__snake_case : int = []
__snake_case : int = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__snake_case : Tuple = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__snake_case : Tuple = self.transformer_index_for_condition[i]
__snake_case : Optional[Any] = self.transformers[transformer_index](
__magic_name__ , encoder_hidden_states=__magic_name__ , timestep=__magic_name__ , cross_attention_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__snake_case : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__snake_case : List[str] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__magic_name__ )
| 26 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__UpperCamelCase = "bart"
__UpperCamelCase = True
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : int = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__snake_case : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__snake_case : List[Any] = qar_model.eval()
else:
__snake_case , __snake_case : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
__snake_case : List[str] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__snake_case : Any = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__snake_case : int = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__snake_case : int = sas_model.eval()
else:
__snake_case , __snake_case : Dict = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> Tuple:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__snake_case : Tuple = faiss.StandardGpuResources()
__snake_case : Optional[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__snake_case : str = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case : Optional[int] = faiss.IndexFlatIP(128 )
__snake_case : Any = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase )
wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU
else:
__snake_case , __snake_case : Tuple = (None, None)
__snake_case : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowerCamelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__snake_case : Dict = elia["""train_eli5"""]
__snake_case : int = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__snake_case : Dict = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowerCamelCase )
return (elia_train, eli5_train_q_index)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_indexes()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = load_models()
__UpperCamelCase , __UpperCamelCase = load_train_data()
def _a ( _lowerCamelCase , _lowerCamelCase=10 ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase )
__snake_case , __snake_case : Tuple = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase )
__snake_case : Tuple = [elia_train[int(_lowerCamelCase )] for i in I[0]]
return nn_examples
def _a ( _lowerCamelCase , _lowerCamelCase="wiki40b" , _lowerCamelCase="dense" , _lowerCamelCase=10 ) -> Optional[Any]:
"""simple docstring"""
if source == "none":
__snake_case , __snake_case : Dict = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case : Dict = query_qa_dense_index(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__snake_case , __snake_case : str = query_es_index(
_lowerCamelCase , _lowerCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=_lowerCamelCase , )
__snake_case : Optional[int] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__snake_case : Optional[Any] = """question: {} context: {}""".format(_lowerCamelCase , _lowerCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowerCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None),
} )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=64 , _lowerCamelCase=256 , _lowerCamelCase=False , _lowerCamelCase=2 , _lowerCamelCase=0.95 , _lowerCamelCase=0.8 ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
__snake_case : Union[str, Any] = qa_sas_generate(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__UpperCamelCase = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__UpperCamelCase = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__UpperCamelCase = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__UpperCamelCase = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__UpperCamelCase = st.sidebar.checkbox("Demo options")
if demo_options:
__UpperCamelCase = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__UpperCamelCase = action_list.index(action_st)
__UpperCamelCase = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__UpperCamelCase = show_type == "Show full text of passages"
else:
__UpperCamelCase = 3
__UpperCamelCase = True
__UpperCamelCase = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__UpperCamelCase = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__UpperCamelCase = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__UpperCamelCase = "wiki40b"
__UpperCamelCase = "dense"
__UpperCamelCase = "beam"
__UpperCamelCase = 2
__UpperCamelCase = 64
__UpperCamelCase = 256
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = st.sidebar.checkbox("Generation options")
if generate_options:
__UpperCamelCase = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__UpperCamelCase = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__UpperCamelCase = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__UpperCamelCase = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__UpperCamelCase = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__UpperCamelCase = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__UpperCamelCase = None
# start main text
__UpperCamelCase = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__UpperCamelCase = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__UpperCamelCase = st.text_input("Enter your question here:", "")
else:
__UpperCamelCase = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="dense", n_results=10)
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method="sparse", n_results=10)
__UpperCamelCase = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__UpperCamelCase = support_list[:10]
__UpperCamelCase = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__UpperCamelCase , __UpperCamelCase = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__UpperCamelCase , __UpperCamelCase = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__UpperCamelCase = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__UpperCamelCase = res[1].strip()
if sec_titles == "":
__UpperCamelCase = "[{}]({})".format(res[0], wiki_url)
else:
__UpperCamelCase = sec_titles.split(" & ")
__UpperCamelCase = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__UpperCamelCase = find_nearest_training(question)
__UpperCamelCase = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__UpperCamelCase = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__UpperCamelCase = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 26 | 1 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _A ( __lowercase ):
lowercase__: torch.FloatTensor
lowercase__: Optional[torch.FloatTensor] = None
def _a ( _lowerCamelCase , _lowerCamelCase=0.9_99 , _lowerCamelCase="cosine" , ) -> int:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__snake_case : Dict = []
for i in range(_lowerCamelCase ):
__snake_case : List[Any] = i / num_diffusion_timesteps
__snake_case : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class _A ( __lowercase , __lowercase ):
@register_to_config
def __init__( self : Optional[Any] , __magic_name__ : int = 10_00 , __magic_name__ : str = "fixed_small_log" , __magic_name__ : bool = True , __magic_name__ : Optional[float] = 1.0 , __magic_name__ : str = "epsilon" , __magic_name__ : str = "squaredcos_cap_v2" , ) -> List[Any]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__snake_case : Optional[int] = betas_for_alpha_bar(__magic_name__ )
__snake_case : List[str] = 1.0 - self.betas
__snake_case : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
__snake_case : Any = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__snake_case : int = 1.0
# setable values
__snake_case : Union[str, Any] = None
__snake_case : Any = torch.from_numpy(np.arange(0 , __magic_name__ )[::-1].copy() )
__snake_case : Optional[Any] = variance_type
def lowercase__ ( self : Optional[Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def lowercase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Union[str, torch.device] = None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = num_inference_steps
__snake_case : Optional[int] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__snake_case : int = (np.arange(0 , __magic_name__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__snake_case : str = torch.from_numpy(__magic_name__ ).to(__magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=None , __magic_name__ : int=None , __magic_name__ : List[Any]=None ) -> List[Any]:
"""simple docstring"""
if prev_timestep is None:
__snake_case : Union[str, Any] = t - 1
__snake_case : Optional[int] = self.alphas_cumprod[t]
__snake_case : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__snake_case : Union[str, Any] = 1 - alpha_prod_t
__snake_case : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__snake_case : Union[str, Any] = self.betas[t]
else:
__snake_case : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__snake_case : Union[str, Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__snake_case : Optional[int] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__snake_case : int = torch.log(torch.clamp(__magic_name__ , min=1E-20 ) )
__snake_case : List[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__snake_case : Tuple = variance.log()
__snake_case : str = beta.log()
__snake_case : int = (predicted_variance + 1) / 2
__snake_case : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self : str , __magic_name__ : torch.FloatTensor , __magic_name__ : int , __magic_name__ : torch.FloatTensor , __magic_name__ : Optional[int] = None , __magic_name__ : Dict=None , __magic_name__ : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
__snake_case : Any = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__snake_case , __snake_case : Optional[Any] = torch.split(__magic_name__ , sample.shape[1] , dim=1 )
else:
__snake_case : Tuple = None
# 1. compute alphas, betas
if prev_timestep is None:
__snake_case : Tuple = t - 1
__snake_case : Union[str, Any] = self.alphas_cumprod[t]
__snake_case : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__snake_case : Optional[int] = 1 - alpha_prod_t
__snake_case : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__snake_case : List[Any] = self.betas[t]
__snake_case : List[Any] = self.alphas[t]
else:
__snake_case : int = 1 - alpha_prod_t / alpha_prod_t_prev
__snake_case : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__snake_case : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__snake_case : Optional[Any] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__snake_case : Tuple = torch.clamp(
__magic_name__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__snake_case : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case : List[str] = 0
if t > 0:
__snake_case : Dict = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__magic_name__ , device=model_output.device )
__snake_case : Union[str, Any] = self._get_variance(
__magic_name__ , predicted_variance=__magic_name__ , prev_timestep=__magic_name__ , )
if self.variance_type == "fixed_small_log":
__snake_case : Tuple = variance
elif self.variance_type == "learned_range":
__snake_case : int = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
""" for the UnCLIPScheduler.""" )
__snake_case : List[Any] = variance * variance_noise
__snake_case : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__magic_name__ , pred_original_sample=__magic_name__ )
def lowercase__ ( self : List[str] , __magic_name__ : torch.FloatTensor , __magic_name__ : torch.FloatTensor , __magic_name__ : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
__snake_case : str = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__snake_case : Tuple = timesteps.to(original_samples.device )
__snake_case : List[str] = alphas_cumprod[timesteps] ** 0.5
__snake_case : Any = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__snake_case : Union[str, Any] = sqrt_alpha_prod.unsqueeze(-1 )
__snake_case : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
__snake_case : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__snake_case : Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__snake_case : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 26 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class _A ( __lowercase ):
def __init__( self : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 26 | 1 |
'''simple docstring'''
from statistics import mean, stdev
def _a ( _lowerCamelCase , _lowerCamelCase = 3 ) -> list:
"""simple docstring"""
__snake_case : Dict = min(_lowerCamelCase )
__snake_case : int = max(_lowerCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowerCamelCase ) for x in data]
def _a ( _lowerCamelCase , _lowerCamelCase = 3 ) -> list:
"""simple docstring"""
__snake_case : List[str] = mean(_lowerCamelCase )
__snake_case : Any = stdev(_lowerCamelCase )
# standardize data
return [round((x - mu) / (sigma) , _lowerCamelCase ) for x in data]
| 26 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__UpperCamelCase = "examples/"
__UpperCamelCase = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__UpperCamelCase = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__UpperCamelCase = "README.md"
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : Union[str, Any] = f.read()
__snake_case , __snake_case : List[Any] = REPLACE_PATTERNS[pattern]
__snake_case : Optional[Any] = replace.replace("""VERSION""" , _lowerCamelCase )
__snake_case : Optional[Any] = re_pattern.sub(_lowerCamelCase , _lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_lowerCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , pattern="""examples""" )
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not patch:
update_version_in_examples(_lowerCamelCase )
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = """🤗 Transformers currently provides the following architectures"""
__snake_case : List[Any] = """1. Want to contribute a new model?"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : List[str] = f.readlines()
# Find the start of the list.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : int = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : Optional[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : List[Any] = f.read()
__snake_case : str = REPLACE_PATTERNS["""init"""][0].search(_lowerCamelCase ).groups()[0]
return packaging.version.parse(_lowerCamelCase )
def _a ( _lowerCamelCase=False ) -> int:
"""simple docstring"""
__snake_case : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Dict = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Dict = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Any = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase , patch=_lowerCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _a ( ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = get_version()
__snake_case : Tuple = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
__snake_case : int = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_lowerCamelCase ) == 0:
__snake_case : Optional[int] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_lowerCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__UpperCamelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 26 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _A ( unittest.TestCase ):
@require_torch
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Any = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
__snake_case : Optional[Any] = load_dataset("""ashraq/esc50""" )
__snake_case : Dict = dataset["""train"""]["""audio"""][-1]["""array"""]
__snake_case : str = audio_classifier(__magic_name__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [{"""score""": 0.501, """label""": """Sound of a dog"""}, {"""score""": 0.499, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@slow
@require_torch
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[int] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
__snake_case : Tuple = load_dataset("""ashraq/esc50""" )
__snake_case : Union[str, Any] = dataset["""train"""]["""audio"""][-1]["""array"""]
__snake_case : str = audio_classifier(__magic_name__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
] , )
__snake_case : Any = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
__snake_case : Optional[int] = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"""score""": 0.999, """label""": """Sound of a dog"""},
{"""score""": 0.001, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
| 26 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase ) -> list[tuple[int, int]]:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = position
__snake_case : Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__snake_case : Optional[Any] = []
for position in positions:
__snake_case , __snake_case : List[Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_lowerCamelCase )
return permissible_positions
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> bool:
"""simple docstring"""
if is_complete(_lowerCamelCase ):
return True
for position in get_valid_pos(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case , __snake_case : Dict = position
if board[y][x] == 0:
__snake_case : int = curr + 1
if open_knight_tour_helper(_lowerCamelCase , _lowerCamelCase , curr + 1 ):
return True
__snake_case : int = 0
return False
def _a ( _lowerCamelCase ) -> list[list[int]]:
"""simple docstring"""
__snake_case : Dict = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
__snake_case : Dict = 1
if open_knight_tour_helper(_lowerCamelCase , (i, j) , 1 ):
return board
__snake_case : Any = 0
__snake_case : str = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class _A ( nn.Module ):
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__snake_case : List[Any] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : Optional[Any] = nn.Linear(4 , 5 )
def lowercase__ ( self : str , __magic_name__ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class _A ( __lowercase ):
def lowercase__ ( self : List[str] , __magic_name__ : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class _A ( __lowercase ):
def lowercase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return output + 1
class _A ( unittest.TestCase ):
def lowercase__ ( self : Dict ) -> Any:
"""simple docstring"""
__snake_case : int = ModelForTest()
__snake_case : Tuple = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
self.assertEqual(test_model._hf_hook , __magic_name__ )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Optional[int] = ModelHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
add_hook_to_module(__magic_name__ , __magic_name__ , append=__magic_name__ )
self.assertEqual(isinstance(test_model._hf_hook , __magic_name__ ) , __magic_name__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__magic_name__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__magic_name__ )
self.assertFalse(hasattr(__magic_name__ , """_hf_hook""" ) )
self.assertFalse(hasattr(__magic_name__ , """_old_forward""" ) )
def lowercase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[Any] = ModelForTest()
__snake_case : Any = torch.randn(2 , 3 )
__snake_case : str = test_model(x + 1 )
__snake_case : int = test_model(x + 2 )
__snake_case : Union[str, Any] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Optional[int] = PreForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : Optional[int] = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[str] = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-5 )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : str = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Any = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__snake_case : Any = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : Dict = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__snake_case : str = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : int = test_model(__magic_name__ )
assert torch.allclose(__magic_name__ , output + 2 , atol=1E-5 )
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ModelForTest()
__snake_case : int = torch.randn(2 , 3 )
__snake_case : Any = test_model(__magic_name__ )
__snake_case : Dict = PostForwardHook()
add_hook_to_module(__magic_name__ , __magic_name__ )
__snake_case : List[Any] = test_model(__magic_name__ )
self.assertTrue(torch.allclose(__magic_name__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__snake_case : Dict = True
__snake_case : int = test_model(__magic_name__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowercase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Union[str, Any] = model(__magic_name__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__magic_name__ , AlignDevicesHook(io_same_device=__magic_name__ ) )
__snake_case : Tuple = torch.randn(2 , 3 ).to(0 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , torch.device(0 ) )
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : List[str] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Any = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__snake_case : int = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__magic_name__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__magic_name__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : str = torch.randn(2 , 3 )
__snake_case : str = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
__snake_case : Tuple = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : Union[str, Any] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Optional[int] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , offload_buffers=__magic_name__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : Dict = torch.randn(2 , 3 )
__snake_case : Optional[int] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__snake_case : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__snake_case : List[str] = torch.device(__magic_name__ )
self.assertEqual(model.batchnorm.running_mean.device , __magic_name__ )
__snake_case : Tuple = torch.randn(2 , 3 )
__snake_case : Optional[Any] = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__magic_name__ , execution_device=__magic_name__ , offload=__magic_name__ , weights_map=model.state_dict() , offload_buffers=__magic_name__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__snake_case : List[str] = torch.randn(2 , 3 )
__snake_case : Dict = model(__magic_name__ )
self.assertEqual(output.device , __magic_name__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__magic_name__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 26 | 1 |
'''simple docstring'''
import numpy
class _A :
def __init__( self : Any , __magic_name__ : numpy.ndarray , __magic_name__ : numpy.ndarray ) -> None:
"""simple docstring"""
__snake_case : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__snake_case : str = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__snake_case : Dict = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__snake_case : Any = numpy.random.rand(3 , 1 )
# Real output values provided.
__snake_case : Optional[int] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__snake_case : Tuple = numpy.zeros(output_array.shape )
def lowercase__ ( self : Optional[int] ) -> numpy.ndarray:
"""simple docstring"""
__snake_case : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__snake_case : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__snake_case : List[str] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase__ ( self : Tuple ) -> None:
"""simple docstring"""
__snake_case : Optional[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__snake_case : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__snake_case : Tuple = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase__ ( self : Any , __magic_name__ : numpy.ndarray , __magic_name__ : int , __magic_name__ : bool ) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
__snake_case : Dict = self.feedforward()
self.back_propagation()
if give_loss:
__snake_case : int = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : numpy.ndarray ) -> int:
"""simple docstring"""
__snake_case : Dict = input_arr
__snake_case : List[str] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__snake_case : List[str] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__snake_case : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _a ( _lowerCamelCase ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def _a ( _lowerCamelCase ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def _a ( ) -> int:
"""simple docstring"""
__snake_case : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__snake_case : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__snake_case : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowerCamelCase , output_array=_lowerCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowerCamelCase , iterations=10 , give_loss=_lowerCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 26 |
'''simple docstring'''
from __future__ import annotations
__UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
__snake_case : List[str] = init[0]
__snake_case : str = init[1]
__snake_case : int = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : List[str] = [[f, g, x, y]]
__snake_case : Any = False # flag that is set when search is complete
__snake_case : int = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Tuple = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : List[Any] = next_cell[3]
__snake_case : int = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Optional[Any] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
__snake_case : Union[str, Any] = x + DIRECTIONS[i][0]
__snake_case : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : str = g + cost
__snake_case : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : List[str] = 1
__snake_case : Optional[int] = i
__snake_case : List[str] = []
__snake_case : Optional[int] = goal[0]
__snake_case : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Dict = x - DIRECTIONS[action[x][y]][0]
__snake_case : int = y - DIRECTIONS[action[x][y]][1]
__snake_case : Optional[int] = xa
__snake_case : int = ya
invpath.append([x, y] )
__snake_case : Optional[int] = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
__UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
__UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
__UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__UpperCamelCase = 99
__UpperCamelCase , __UpperCamelCase = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 26 | 1 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__UpperCamelCase = Mapping[str, np.ndarray]
__UpperCamelCase = Mapping[str, Any] # Is a nested dict.
__UpperCamelCase = 0.01
@dataclasses.dataclass(frozen=__lowercase )
class _A :
lowercase__: np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase__: np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase__: np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase__: np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase__: np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase__: Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase__: Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowercase__: Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowercase__: Optional[Sequence[int]] = None
def _a ( _lowerCamelCase ) -> Protein:
"""simple docstring"""
__snake_case : List[str] = R"""(\[[A-Z]+\]\n)"""
__snake_case : List[str] = [tag.strip() for tag in re.split(_lowerCamelCase , _lowerCamelCase ) if len(_lowerCamelCase ) > 0]
__snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
__snake_case : List[str] = ["N", "CA", "C"]
__snake_case : Union[str, Any] = None
__snake_case : Optional[Any] = None
__snake_case : str = None
for g in groups:
if "[PRIMARY]" == g[0]:
__snake_case : Any = g[1][0].strip()
for i in range(len(_lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
__snake_case : Union[str, Any] = """X""" # FIXME: strings are immutable
__snake_case : Optional[Any] = np.array(
[residue_constants.restype_order.get(_lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
__snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(_lowerCamelCase , g[1][axis].split() ) ) )
__snake_case : Optional[Any] = np.array(_lowerCamelCase )
__snake_case : Any = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
__snake_case : Union[str, Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
__snake_case : Union[str, Any] = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
__snake_case : List[str] = np.zeros(
(
len(_lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_lowerCamelCase ):
__snake_case : Optional[Any] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_lowerCamelCase , atom_mask=_lowerCamelCase , aatype=_lowerCamelCase , residue_index=np.arange(len(_lowerCamelCase ) ) , b_factors=_lowerCamelCase , )
def _a ( _lowerCamelCase , _lowerCamelCase = 0 ) -> List[str]:
"""simple docstring"""
__snake_case : List[str] = []
__snake_case : Tuple = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
__snake_case : Optional[int] = prot.parents
__snake_case : Optional[int] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
__snake_case : Any = [p for i, p in zip(_lowerCamelCase , _lowerCamelCase ) if i == chain_id]
if parents is None or len(_lowerCamelCase ) == 0:
__snake_case : str = ["""N/A"""]
pdb_headers.append(F'''PARENT {" ".join(_lowerCamelCase )}''' )
return pdb_headers
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : List[str] = []
__snake_case : int = pdb_str.split("""\n""" )
__snake_case : Union[str, Any] = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
__snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
__snake_case : Union[str, Any] = []
if prot.parents_chain_index is not None:
__snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_lowerCamelCase ) , [] )
parent_dict[str(_lowerCamelCase )].append(_lowerCamelCase )
__snake_case : Optional[int] = max([int(_lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
__snake_case : Tuple = parent_dict.get(str(_lowerCamelCase ) , ["""N/A"""] )
parents_per_chain.append(_lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
__snake_case : Dict = [["""N/A"""]]
def make_parent_line(_lowerCamelCase ) -> str:
return F'''PARENT {" ".join(_lowerCamelCase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
__snake_case : Any = 0
for i, l in enumerate(_lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_lowerCamelCase ):
__snake_case : int = parents_per_chain[chain_counter]
else:
__snake_case : int = ["""N/A"""]
out_pdb_lines.append(make_parent_line(_lowerCamelCase ) )
return "\n".join(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = residue_constants.restypes + ["""X"""]
def res_atoa(_lowerCamelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
__snake_case : Union[str, Any] = residue_constants.atom_types
__snake_case : List[str] = []
__snake_case : int = prot.atom_mask
__snake_case : Optional[int] = prot.aatype
__snake_case : List[str] = prot.atom_positions
__snake_case : str = prot.residue_index.astype(np.intaa )
__snake_case : Optional[int] = prot.b_factors
__snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
__snake_case : Optional[int] = get_pdb_headers(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
pdb_lines.extend(_lowerCamelCase )
__snake_case : Dict = aatype.shape[0]
__snake_case : Optional[int] = 1
__snake_case : List[str] = 0
__snake_case : Optional[Any] = string.ascii_uppercase
__snake_case : Tuple = None
# Add all atom sites.
for i in range(_lowerCamelCase ):
__snake_case : Optional[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
__snake_case : int = """ATOM"""
__snake_case : int = atom_name if len(_lowerCamelCase ) == 4 else F''' {atom_name}'''
__snake_case : List[str] = """"""
__snake_case : Optional[Any] = """"""
__snake_case : Any = 1.00
__snake_case : Optional[Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
__snake_case : List[Any] = """"""
__snake_case : Tuple = """A"""
if chain_index is not None:
__snake_case : Optional[int] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
__snake_case : Optional[int] = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
__snake_case : List[str] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
__snake_case : Optional[Any] = True
__snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
__snake_case : Optional[Any] = """TER"""
__snake_case : List[str] = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(_lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_lowerCamelCase , _lowerCamelCase ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(_lowerCamelCase )
def _a ( _lowerCamelCase ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=_lowerCamelCase , remark=_lowerCamelCase , parents=_lowerCamelCase , parents_chain_index=_lowerCamelCase , )
| 26 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""only integers accepted as input""" )
else:
__snake_case : List[Any] = str(abs(_lowerCamelCase ) )
__snake_case : Union[str, Any] = [list(_lowerCamelCase ) for char in range(len(_lowerCamelCase ) )]
for index in range(len(_lowerCamelCase ) ):
num_transpositions[index].pop(_lowerCamelCase )
return max(
int("""""".join(list(_lowerCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 26 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( __lowercase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
@property
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = ort.SessionOptions()
__snake_case : Optional[Any] = False
return options
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : str = """A red cat sitting on a park bench"""
__snake_case : List[str] = np.random.RandomState(0 )
__snake_case : Optional[int] = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=10 , generator=__magic_name__ , output_type="""np""" , )
__snake_case : Tuple = output.images
__snake_case : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__snake_case : str = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__snake_case : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case : List[str] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : Union[str, Any] = """A red cat sitting on a park bench"""
__snake_case : Union[str, Any] = np.random.RandomState(0 )
__snake_case : Optional[Any] = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=20 , generator=__magic_name__ , output_type="""np""" , )
__snake_case : Optional[int] = output.images
__snake_case : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__snake_case : Dict = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 26 |
'''simple docstring'''
from __future__ import annotations
import math
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
)
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__snake_case : Optional[int] = math.log(len(_lowerCamelCase ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 26 | 1 |
'''simple docstring'''
import cva
import numpy as np
class _A :
def __init__( self : Any , __magic_name__ : float , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
if k in (0.04, 0.06):
__snake_case : List[str] = k
__snake_case : int = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return str(self.k )
def lowercase__ ( self : Dict , __magic_name__ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
__snake_case : Dict = cva.imread(__magic_name__ , 0 )
__snake_case , __snake_case : List[str] = img.shape
__snake_case : list[list[int]] = []
__snake_case : str = img.copy()
__snake_case : Tuple = cva.cvtColor(__magic_name__ , cva.COLOR_GRAY2RGB )
__snake_case , __snake_case : List[Any] = np.gradient(__magic_name__ )
__snake_case : Optional[Any] = dx**2
__snake_case : Tuple = dy**2
__snake_case : List[Any] = dx * dy
__snake_case : List[Any] = 0.04
__snake_case : Tuple = self.window_size // 2
for y in range(__magic_name__ , h - offset ):
for x in range(__magic_name__ , w - offset ):
__snake_case : Dict = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : Optional[int] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : str = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__snake_case : List[str] = (wxx * wyy) - (wxy**2)
__snake_case : Dict = wxx + wyy
__snake_case : List[str] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 26 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) -> None:
"""simple docstring"""
if start is None:
__snake_case : Optional[Any] = 0
if end is None:
__snake_case : Optional[Any] = len(_lowerCamelCase ) - 1
if start >= end:
return
__snake_case : Tuple = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__snake_case , __snake_case : str = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.