code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE( __lowercase = "AAPL" ) -> str: A: Any = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}""" A: int = BeautifulSoup(requests.get(__lowercase ).text , '''html.parser''' ) A: Any = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
334
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for data in source_data: for i, el in enumerate(__lowercase ): if len(__lowercase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowercase ) ) return data_lists def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for dlist, weight in zip(__lowercase , __lowercase ): A: List[str] = min(__lowercase ) A: Union[str, Any] = max(__lowercase ) A: list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A: List[str] = F"""Invalid weight of {weight:f} provided""" raise ValueError(__lowercase ) score_lists.append(__lowercase ) return score_lists def SCREAMING_SNAKE_CASE( __lowercase ) -> list[float]: A: list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowercase ): A: str = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: Any = get_data(__lowercase ) A: str = calculate_each_score(__lowercase , __lowercase ) A: int = generate_final_scores(__lowercase ) # append scores to source data for i, ele in enumerate(__lowercase ): source_data[i].append(__lowercase ) return source_data
334
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : List[str] = """speech_to_text""" UpperCamelCase_ : List[str] = ["""past_key_values"""] UpperCamelCase_ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int=1_00_00 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : List[str]=20_48 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : str=6 , SCREAMING_SNAKE_CASE_ : Tuple=20_48 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str="relu" , SCREAMING_SNAKE_CASE_ : Dict=2_56 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : str=1 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : List[str]=60_00 , SCREAMING_SNAKE_CASE_ : Optional[int]=10_24 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=(5, 5) , SCREAMING_SNAKE_CASE_ : Tuple=10_24 , SCREAMING_SNAKE_CASE_ : Dict=80 , SCREAMING_SNAKE_CASE_ : str=1 , **SCREAMING_SNAKE_CASE_ : str , ) -> Union[str, Any]: '''simple docstring''' A: Tuple = vocab_size A: Union[str, Any] = d_model A: int = encoder_ffn_dim A: Any = encoder_layers A: str = encoder_attention_heads A: Dict = decoder_ffn_dim A: Tuple = decoder_layers A: Dict = decoder_attention_heads A: Any = dropout A: Dict = attention_dropout A: List[str] = activation_dropout A: List[Any] = activation_function A: Union[str, Any] = init_std A: Optional[Any] = encoder_layerdrop A: int = decoder_layerdrop A: int = use_cache A: List[str] = encoder_layers A: Tuple = scale_embedding # scale factor will be sqrt(d_model) if True A: Dict = max_source_positions A: Tuple = max_target_positions A: List[str] = num_conv_layers A: int = list(SCREAMING_SNAKE_CASE_ ) A: Dict = conv_channels A: Tuple = input_feat_per_channel A: Tuple = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ''' f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """ f"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
334
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = DPRContextEncoderTokenizer class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Dict = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Optional[int] = DPRQuestionEncoderTokenizer UpperCamelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) UpperCamelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ : '''simple docstring''' def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) elif titles is None or texts is None: A: Union[str, Any] = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Union[str, Any] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles] A: Optional[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts] A: str = len(SCREAMING_SNAKE_CASE_ ) A: List[Any] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages assert len(SCREAMING_SNAKE_CASE_ ) == len( SCREAMING_SNAKE_CASE_ ), f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts.""" A: Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: Dict = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: str = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] } if return_attention_mask is not False: A: Union[str, Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) A: Optional[Any] = attention_mask return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : BatchEncoding , SCREAMING_SNAKE_CASE_ : DPRReaderOutput , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : int = 64 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Any = reader_input['''input_ids'''] A , A , A: str = reader_output[:3] A: str = len(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ ) A: List[DPRReaderOutput] = [] for doc_id in sorted_docs: A: List[str] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence A: Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A: Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: A: int = len(SCREAMING_SNAKE_CASE_ ) A: Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(SCREAMING_SNAKE_CASE_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Union[str, Any] = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) A: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ ) A: Dict = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A: int = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(SCREAMING_SNAKE_CASE_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : Optional[Any] = DPRReaderTokenizer
334
1
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=1_0 ) -> Tuple: A: List[Any] = [] for _ in range(__lowercase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=1_0 ) -> Any: A: int = [] for step in range(__lowercase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A: Any = os.path.join(__lowercase , '''schedule.bin''' ) torch.save(scheduler.state_dict() , __lowercase ) A: Union[str, Any] = torch.load(__lowercase ) scheduler.load_state_dict(__lowercase ) return lrs @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' A: Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE_ ) A: str = torch.tensor([0.4, 0.2, -0.5] ) A: List[str] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_00 ): A: int = criterion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _snake_case ( self : int ) -> Dict: '''simple docstring''' A: List[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE_ ) A: List[str] = torch.tensor([0.4, 0.2, -0.5] ) A: int = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A: List[Any] = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE_ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE_ , scale_parameter=SCREAMING_SNAKE_CASE_ , warmup_init=SCREAMING_SNAKE_CASE_ , ) for _ in range(10_00 ): A: Dict = criterion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None UpperCamelCase_ : List[str] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None UpperCamelCase_ : List[str] = 10 def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=None ) -> Dict: '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ , msg=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A: Optional[int] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A , A: Optional[Any] = data A: Any = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A: int = unwrap_schedule(SCREAMING_SNAKE_CASE_ , self.num_steps ) self.assertListAlmostEqual( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tol=1E-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , ) A: Union[str, Any] = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE_ ) # wrap to test picklability of the schedule A: Any = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE_ , self.num_steps ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , msg=f"""failed for {scheduler_func} in save and reload""" ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: '''simple docstring''' A: Dict = fn def __call__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]: '''simple docstring''' return self.fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @classmethod def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict: '''simple docstring''' A: int = list(map(self , scheduler.lr_lambdas ) )
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Generator[tuple[str, ...], None, None]: A: List[str] = iter(__lowercase ) while True: A: int = tuple(itertools.islice(__lowercase , __lowercase ) ) if not chunk: return yield chunk def SCREAMING_SNAKE_CASE( __lowercase ) -> str: A: int = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) A: Dict = '''''' if len(__lowercase ) < 2: return dirty for i in range(len(__lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(__lowercase ) & 1: clean += "X" return clean def SCREAMING_SNAKE_CASE( __lowercase ) -> list[str]: # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) A: Union[str, Any] = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler A: Any = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(__lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(__lowercase ) return table def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str: A: str = generate_table(__lowercase ) A: int = prepare_input(__lowercase ) A: str = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__lowercase , 2 ): A , A: Dict = divmod(table.index(__lowercase ) , 5 ) A , A: Any = divmod(table.index(__lowercase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str: A: List[Any] = generate_table(__lowercase ) A: Union[str, Any] = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__lowercase , 2 ): A , A: str = divmod(table.index(__lowercase ) , 5 ) A , A: Dict = divmod(table.index(__lowercase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
334
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' pass class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None: '''simple docstring''' A: Any = data A: Node | None = None def __iter__( self : Optional[int] ) -> List[str]: '''simple docstring''' A: List[str] = self A: Dict = [] while node: if node in visited: raise ContainsLoopError visited.append(SCREAMING_SNAKE_CASE_ ) yield node.data A: str = node.next_node @property def _snake_case ( self : List[str] ) -> bool: '''simple docstring''' try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCamelCase = Node(1) UpperCamelCase = Node(2) UpperCamelCase = Node(3) UpperCamelCase = Node(4) print(root_node.has_loop) # False UpperCamelCase = root_node.next_node print(root_node.has_loop) # True UpperCamelCase = Node(5) UpperCamelCase = Node(6) UpperCamelCase = Node(5) UpperCamelCase = Node(6) print(root_node.has_loop) # False UpperCamelCase = Node(1) print(root_node.has_loop) # False
334
1
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = DebertaTokenizer UpperCamelCase_ : List[str] = True UpperCamelCase_ : int = DebertaTokenizerFast def _snake_case ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A: Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] A: Union[str, Any] = {'''unk_token''': '''[UNK]'''} A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]: '''simple docstring''' A: Optional[int] = '''lower newer''' A: str = '''lower newer''' return input_text, output_text def _snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A: str = self.get_tokenizer() A: Any = '''lower newer''' A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokens + [tokenizer.unk_token] A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Any: '''simple docstring''' A: str = self.get_tokenizer() A: List[str] = tokenizer('''Hello''' , '''World''' ) A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Tuple ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: int = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Dict = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']] # fmt: off A: Any = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on A: Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ ) for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
334
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase = 4 ) -> list[list[int]]: A: Tuple = abs(__lowercase ) or 4 return [[1 + x + y * row_size for x in range(__lowercase )] for y in range(__lowercase )] def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(transpose(__lowercase ) ) # OR.. transpose(reverse_column(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(reverse_column(__lowercase ) ) # OR.. reverse_column(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_column(transpose(__lowercase ) ) # OR.. transpose(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Union[str, Any] = [list(__lowercase ) for x in zip(*__lowercase )] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[int] = matrix[::-1] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[Any] = [x[::-1] for x in matrix] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> None: for i in matrix: print(*__lowercase ) if __name__ == "__main__": UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 90 counterclockwise:\n''') print_matrix(rotate_aa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 180:\n''') print_matrix(rotate_aaa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 270 counterclockwise:\n''') print_matrix(rotate_aaa(matrix))
334
1
'''simple docstring''' import re def SCREAMING_SNAKE_CASE( __lowercase ) -> str: if len(re.findall('''[ATCG]''' , __lowercase ) ) != len(__lowercase ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' from __future__ import annotations import numpy as np def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: return np.maximum(0 , __lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
334
1
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Any: A: Dict = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, oder?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] A: Tuple = { '''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''], '''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''], '''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''], '''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''], } A: str = F"""{src_lang}-{tgt_lang}""" A: Optional[Any] = F""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = \"{texts[src_lang]}\" input_ids = tokenizer.encode(input, return_tensors=\"pt\") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(__lowercase , exist_ok=__lowercase ) A: Dict = os.path.join(__lowercase , '''README.md''' ) print(F"""Generating {path}""" ) with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(__lowercase ) # make sure we are under the root of the project UpperCamelCase = Path(__file__).resolve().parent.parent.parent UpperCamelCase = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCamelCase , UpperCamelCase , UpperCamelCase = model_name.split('''-''') UpperCamelCase = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: A: Optional[Any] = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append( (F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''), ('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''), ('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''), ('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''), ('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''), ('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''), ] ) return rename_keys def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) A: Tuple = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" ) A: Optional[int] = in_proj_weight[ : encoder_config.hidden_size, : ] A: Dict = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] A: Any = in_proj_weight[ -encoder_config.hidden_size :, : ] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: A: Optional[Any] = dct.pop(__lowercase ) A: List[Any] = val def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: if "handwritten" in checkpoint_url: A: str = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: A: Union[str, Any] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg''' A: Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: A: Any = ViTConfig(image_size=3_8_4 , qkv_bias=__lowercase ) A: List[Any] = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: A: Dict = 7_6_8 elif "large" in checkpoint_url: # use ViT-large encoder A: Any = 1_0_2_4 A: List[Any] = 4_0_9_6 A: str = 2_4 A: int = 1_6 A: Tuple = 1_0_2_4 else: raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: A: Any = False A: Tuple = '''relu''' A: Any = 1_0_2_4 A: Union[str, Any] = True A: List[str] = False A: Tuple = False # load HuggingFace model A: List[Any] = ViTModel(__lowercase , add_pooling_layer=__lowercase ) A: Tuple = TrOCRForCausalLM(__lowercase ) A: Optional[int] = VisionEncoderDecoderModel(encoder=__lowercase , decoder=__lowercase ) model.eval() # load state_dict of original model, rename some keys A: Tuple = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' , check_hash=__lowercase )['''model'''] A: Union[str, Any] = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): A: Tuple = state_dict.pop(__lowercase ) if key.startswith('''decoder''' ) and "output_projection" not in key: A: List[str] = val else: A: Tuple = val # load state dict model.load_state_dict(__lowercase ) # Check outputs on an image A: int = ViTImageProcessor(size=encoder_config.image_size ) A: Tuple = RobertaTokenizer.from_pretrained('''roberta-large''' ) A: List[str] = TrOCRProcessor(__lowercase , __lowercase ) A: Any = processor(images=prepare_img(__lowercase ) , return_tensors='''pt''' ).pixel_values # verify logits A: str = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) A: Tuple = model(pixel_values=__lowercase , decoder_input_ids=__lowercase ) A: Optional[int] = outputs.logits A: List[str] = torch.Size([1, 1, 5_0_2_6_5] ) if "trocr-base-handwritten" in checkpoint_url: A: List[Any] = torch.tensor( [-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] ) elif "trocr-large-handwritten" in checkpoint_url: A: int = torch.tensor( [-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] ) elif "trocr-base-printed" in checkpoint_url: A: int = torch.tensor( [-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] ) elif "trocr-large-printed" in checkpoint_url: A: str = torch.tensor( [-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :1_0] , __lowercase , atol=1E-3 ), "First elements of logits not as expected" Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
334
'''simple docstring''' from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
334
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu UpperCamelCase = [ '''EAGER''', '''AOT_EAGER''', '''INDUCTOR''', '''NVFUSER''', '''AOT_NVFUSER''', '''AOT_CUDAGRAPHS''', '''OFI''', '''FX2TRT''', '''ONNXRT''', '''IPEX''', ] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=None , __lowercase=None , __lowercase=None ) -> Dict: A: Optional[int] = True while ask_again: A: Any = input(__lowercase ) try: if default is not None and len(__lowercase ) == 0: return default return convert_value(__lowercase ) if convert_value is not None else result except Exception: if error_message is not None: print(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=[] , __lowercase=None , __lowercase=0 ) -> Optional[int]: A: Tuple = BulletMenu(__lowercase , __lowercase ) A: Union[str, Any] = menu.run(default_choice=__lowercase ) return convert_value(__lowercase ) if convert_value is not None else result def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: A: Optional[int] = int(__lowercase ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: A: Tuple = int(__lowercase ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> int: A: Dict = int(__lowercase ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def SCREAMING_SNAKE_CASE( __lowercase ) -> Any: A: Optional[Any] = int(__lowercase ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: A: Tuple = int(__lowercase ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> str: return {"yes": True, "no": False}[value.lower()] class lowerCAmelCase_ ( argparse.RawDescriptionHelpFormatter ): '''simple docstring''' def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Tuple: '''simple docstring''' A: Optional[int] = super()._format_usage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = usage.replace('''<command> [<args>] ''' , '''''' ) return usage
334
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' @register_to_config def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 6_55_36 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Tuple: '''simple docstring''' super().__init__() A: Optional[Any] = sample_size # time if time_embedding_type == "fourier": A: Tuple = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ) A: List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": A: str = Timesteps( block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ ) A: Any = block_out_channels[0] if use_timestep_embedding: A: Optional[Any] = block_out_channels[0] * 4 A: List[Any] = TimestepEmbedding( in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , ) A: Optional[Any] = nn.ModuleList([] ) A: str = None A: str = nn.ModuleList([] ) A: Tuple = None # down A: Any = in_channels for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = output_channel A: List[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels A: List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[int] = get_down_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid A: Union[str, Any] = get_mid_block( SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , ) # up A: Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) ) A: List[str] = reversed_block_out_channels[0] if out_block_type is None: A: int = out_channels else: A: Union[str, Any] = block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: List[Any] = output_channel A: int = ( reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels ) A: Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[Any] = get_up_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) A: Any = output_channel # out A: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) A: Optional[int] = get_out_block( out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[UNetaDOutput, Tuple]: '''simple docstring''' A: Any = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): A: Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: A: List[str] = timesteps[None].to(sample.device ) A: int = self.time_proj(SCREAMING_SNAKE_CASE_ ) if self.config.use_timestep_embedding: A: List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ ) else: A: str = timestep_embed[..., None] A: Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) A: Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down A: List[str] = () for downsample_block in self.down_blocks: A , A: Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: A: Dict = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): A: List[Any] = down_block_res_samples[-1:] A: List[str] = down_block_res_samples[:-1] A: Optional[int] = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) # 5. post-process if self.out_block: A: Any = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCamelCase = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' UpperCamelCase = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' UpperCamelCase = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' UpperCamelCase = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' UpperCamelCase = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict=[1, 10, 1_00] , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : Any=3.0 ) -> List[Any]: '''simple docstring''' if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor: A: Optional[Any] = [] A: Tuple = Counter() A: Any = 0 A: List[Any] = defaultdict(SCREAMING_SNAKE_CASE_ ) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): for candidate in candidates: A: int = candidate + '''\n''' + test_case A: Any = (test_program, timeout, task_id, completion_id[task_id]) A: Any = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) futures.append(SCREAMING_SNAKE_CASE_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE_ ): A: str = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) A , A: Any = [], [] for result in results.values(): result.sort() A: Union[str, Any] = [r[1]['''passed'''] for r in result] total.append(len(SCREAMING_SNAKE_CASE_ ) ) correct.append(sum(SCREAMING_SNAKE_CASE_ ) ) A: str = np.array(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = np.array(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = k A: List[str] = {f"""pass@{k}""": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]: def estimator(__lowercase , __lowercase , __lowercase ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__lowercase , __lowercase ): A: int = itertools.repeat(__lowercase , len(__lowercase ) ) else: assert len(__lowercase ) == len(__lowercase ) A: int = iter(__lowercase ) return np.array([estimator(int(__lowercase ) , int(__lowercase ) , __lowercase ) for n, c in zip(__lowercase , __lowercase )] )
334
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None: '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Dict = """upernet""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]=5_12 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int=0.4 , SCREAMING_SNAKE_CASE_ : str=3_84 , SCREAMING_SNAKE_CASE_ : Dict=2_56 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_55 , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Optional[Any]: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A: Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: int = backbone_config.get('''model_type''' ) A: List[str] = CONFIG_MAPPING[backbone_model_type] A: Optional[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = backbone_config A: Any = hidden_size A: Any = initializer_range A: Union[str, Any] = pool_scales A: Optional[Any] = use_auxiliary_head A: List[str] = auxiliary_loss_weight A: Optional[int] = auxiliary_in_channels A: Union[str, Any] = auxiliary_channels A: Tuple = auxiliary_num_convs A: int = auxiliary_concat_input A: Union[str, Any] = loss_ignore_index def _snake_case ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' A: List[str] = copy.deepcopy(self.__dict__ ) A: Union[str, Any] = self.backbone_config.to_dict() A: Optional[Any] = self.__class__.model_type return output
334
'''simple docstring''' from collections import deque class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' A: Union[str, Any] = process_name # process name A: List[str] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A: Dict = arrival_time A: Optional[Any] = burst_time # remaining burst time A: Any = 0 # total time of the process wait in ready queue A: Any = 0 # time from arrival time to completion time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None: '''simple docstring''' A: Dict = number_of_queues # time slice of queues that round robin algorithm applied A: int = time_slices # unfinished process is in this ready_queue A: Tuple = queue # current time A: int = current_time # finished process is in this sequence queue A: deque[Process] = deque() def _snake_case ( self : List[Any] ) -> list[str]: '''simple docstring''' A: str = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Optional[int] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Any = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: List[Any] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]: '''simple docstring''' return [q.burst_time for q in queue] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int: '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of finished process while len(SCREAMING_SNAKE_CASE_ ) != 0: A: Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A: Any = 0 # set the process's turnaround time because it is finished A: int = self.current_time - cp.arrival_time # set the completion time A: List[str] = self.current_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): A: Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A: Optional[Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(SCREAMING_SNAKE_CASE_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A: int = 0 # set the finish time A: Union[str, Any] = self.current_time # update the process' turnaround time because it is finished A: Tuple = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _snake_case ( self : Optional[Any] ) -> deque[Process]: '''simple docstring''' for i in range(self.number_of_queues - 1 ): A , A: Optional[Any] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0) UpperCamelCase = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( f'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( f'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
334
1
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = ["""pixel_values"""] def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , SCREAMING_SNAKE_CASE_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **SCREAMING_SNAKE_CASE_ : Dict , ) -> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) A: Tuple = size if size is not None else {'''shortest_edge''': 2_24} A: Any = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) A: str = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} A: Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) A: List[Any] = do_resize A: Union[str, Any] = size A: List[Any] = resample A: Any = do_center_crop A: List[str] = crop_size A: List[Any] = do_rescale A: Optional[Any] = rescale_factor A: Optional[int] = do_normalize A: Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN A: Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> np.ndarray: '''simple docstring''' A: Any = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: A: Tuple = int((2_56 / 2_24) * size['''shortest_edge'''] ) A: List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( SCREAMING_SNAKE_CASE_ , size=(size_dict['''height'''], size_dict['''width''']) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> np.ndarray: '''simple docstring''' A: Dict = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> np.ndarray: '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> np.ndarray: '''simple docstring''' return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, Iterable[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, Iterable[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[TensorType] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : int , ) -> BatchFeature: '''simple docstring''' A: Union[str, Any] = do_resize if do_resize is not None else self.do_resize A: List[str] = resample if resample is not None else self.resample A: Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop A: Optional[int] = do_rescale if do_rescale is not None else self.do_rescale A: Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor A: Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize A: Union[str, Any] = image_mean if image_mean is not None else self.image_mean A: Optional[int] = image_std if image_std is not None else self.image_std A: Dict = size if size is not None else self.size A: Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) A: Any = crop_size if crop_size is not None else self.crop_size A: Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) A: Optional[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. A: Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: A: str = [self.resize(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: A: Optional[int] = [self.center_crop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: A: Dict = [self.rescale(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: A: Optional[Any] = [self.normalize(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] A: List[str] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] A: int = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
334
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger() @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> int: '''simple docstring''' A: List[str] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(SCREAMING_SNAKE_CASE_ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Dict: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(SCREAMING_SNAKE_CASE_ ) [x.remove() for x in self.handles] return self @property def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : nn.Module UpperCamelCase_ : int = 0 UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]: '''simple docstring''' A: Dict = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized A: Tuple = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) ) A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise Exception( f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while""" f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" ) for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = True ) -> Any: print(F"""Converting {name}...""" ) with torch.no_grad(): A: Union[str, Any] = timm.create_model(__lowercase , pretrained=__lowercase ).eval() A: List[str] = ResNetForImageClassification(__lowercase ).eval() A: int = ModuleTransfer(src=__lowercase , dest=__lowercase ) A: List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(__lowercase ) assert torch.allclose(from_model(__lowercase ) , our_model(__lowercase ).logits ), "The model logits don't match the original one." A: str = F"""resnet{'-'.join(name.split('resnet' ) )}""" print(__lowercase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowercase , ) # we can use the convnext one A: Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , ) print(F"""Pushed {checkpoint_name}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> List[Any]: A: Union[str, Any] = '''imagenet-1k-id2label.json''' A: Union[str, Any] = 1_0_0_0 A: Optional[int] = (1, num_labels) A: Dict = '''huggingface/label-files''' A: Any = num_labels A: Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Optional[int] = {int(__lowercase ): v for k, v in idalabel.items()} A: Optional[int] = idalabel A: List[str] = {v: k for k, v in idalabel.items()} A: str = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase ) A: Optional[Any] = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(__lowercase , names_to_config[model_name] , __lowercase , __lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__lowercase , __lowercase , __lowercase , __lowercase ) return config, expected_shape if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> str: A: Optional[int] = 0 # if input_string is "aba" than new_input_string become "a|b|a" A: int = '''''' A: Tuple = '''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__lowercase ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring A , A: List[str] = 0, 0 # length[i] shows the length of palindromic substring with center i A: int = [1 for i in range(len(__lowercase ) )] # for each character in new_string find corresponding palindromic string A: str = 0 for j in range(len(__lowercase ) ): A: Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__lowercase ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 A: Optional[int] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: A: int = j - k + 1 # noqa: E741 A: Any = j + k - 1 # update max_length and start position if max_length < length[j]: A: str = length[j] A: int = j # create that string A: Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str | Literal[False]: A: List[str] = list(__lowercase ) A: Optional[Any] = list(__lowercase ) A: int = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count += 1 A: Optional[Any] = '''_''' if count > 1: return False else: return "".join(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[str]: A: Any = [] while True: A: Dict = ['''$'''] * len(__lowercase ) A: Union[str, Any] = [] for i in range(len(__lowercase ) ): for j in range(i + 1 , len(__lowercase ) ): A: Any = compare_string(binary[i] , binary[j] ) if k is False: A: Any = '''*''' A: List[Any] = '''*''' temp.append('''X''' ) for i in range(len(__lowercase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__lowercase ) == 0: return pi A: List[Any] = list(set(__lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]: A: Optional[int] = [] for minterm in minterms: A: Optional[int] = '''''' for _ in range(__lowercase ): A: List[Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(__lowercase ) return temp def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> bool: A: Union[str, Any] = list(__lowercase ) A: Union[str, Any] = list(__lowercase ) A: Optional[int] = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]: A: List[Any] = [] A: Dict = [0] * len(__lowercase ) for i in range(len(chart[0] ) ): A: List[str] = 0 A: str = -1 for j in range(len(__lowercase ) ): if chart[j][i] == 1: count += 1 A: Any = j if count == 1: A: Any = 1 for i in range(len(__lowercase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__lowercase ) ): A: Optional[int] = 0 temp.append(prime_implicants[i] ) while True: A: Dict = 0 A: Optional[int] = -1 A: Dict = 0 for i in range(len(__lowercase ) ): A: str = chart[i].count(1 ) if count_n > max_n: A: Tuple = count_n A: Optional[Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__lowercase ) ): A: Any = 0 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[int]]: A: str = [[0 for x in range(len(__lowercase ) )] for x in range(len(__lowercase ) )] for i in range(len(__lowercase ) ): A: Tuple = prime_implicants[i].count('''_''' ) for j in range(len(__lowercase ) ): if is_for_table(prime_implicants[i] , binary[j] , __lowercase ): A: Optional[Any] = 1 return chart def SCREAMING_SNAKE_CASE( ) -> None: A: int = int(input('''Enter the no. of variables\n''' ) ) A: Optional[int] = [ float(__lowercase ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] A: List[str] = decimal_to_binary(__lowercase , __lowercase ) A: str = check(__lowercase ) print('''Prime Implicants are:''' ) print(__lowercase ) A: List[Any] = prime_implicant_chart(__lowercase , __lowercase ) A: Any = selection(__lowercase , __lowercase ) print('''Essential Prime Implicants are:''' ) print(__lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
334
1
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=7 ) -> List[Any]: A: List[Any] = None if token is not None: A: int = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) A: List[str] = '''636036''' A: str = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" A: List[str] = requests.get(__lowercase , headers=__lowercase ).json() return result["workflow_runs"] def SCREAMING_SNAKE_CASE( __lowercase ) -> str: A: List[str] = get_daily_ci_runs(__lowercase ) A: Dict = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": A: Dict = workflow_run['''id'''] break return workflow_run_id def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]: A: int = get_last_daily_ci_runs(__lowercase ) if workflow_run_id is not None: A: Dict = get_artifacts_links(worflow_run_id=__lowercase , token=__lowercase ) for artifact_name in artifact_names: if artifact_name in artifacts_links: A: List[str] = artifacts_links[artifact_name] download_artifact( artifact_name=__lowercase , artifact_url=__lowercase , output_dir=__lowercase , token=__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> List[str]: get_last_daily_ci_artifacts(__lowercase , __lowercase , __lowercase ) A: List[str] = {} for artifact_name in artifact_names: A: Any = os.path.join(__lowercase , F"""{artifact_name}.zip""" ) if os.path.isfile(__lowercase ): A: Tuple = {} with zipfile.ZipFile(__lowercase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowercase ): # read the file with z.open(__lowercase ) as f: A: str = f.read().decode('''UTF-8''' ) return results
334
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: Tuple = len(__lowercase ) for i in range(length - 1 ): A: Dict = i for k in range(i + 1 , __lowercase ): if collection[k] < collection[least]: A: List[str] = k if least != i: A , A: Tuple = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
334
1
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: A: Optional[Any] = RobertaPreLayerNormConfig.from_pretrained( __lowercase , architectures=['''RobertaPreLayerNormForMaskedLM'''] ) # convert state_dict A: Dict = torch.load(hf_hub_download(repo_id=__lowercase , filename='''pytorch_model.bin''' ) ) A: Optional[int] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('''roberta.''' ): A: Optional[Any] = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ): continue A: Dict = tensor_value A: int = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__lowercase , config=__lowercase , state_dict=__lowercase ) model.save_pretrained(__lowercase ) # convert tokenizer A: List[str] = AutoTokenizer.from_pretrained(__lowercase ) tokenizer.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
334
'''simple docstring''' class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' A: Tuple = None A: Dict = None A: Optional[int] = graph self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: str = len(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = None def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str: '''simple docstring''' if sources is int: A: Union[str, Any] = [sources] if sinks is int: A: Tuple = [sinks] if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: return A: List[str] = sources[0] A: Optional[int] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1: A: Any = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A: Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A: Optional[Any] = max_input_flow A: Optional[Any] = 0 A: str = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A: Optional[Any] = max_input_flow A: str = size - 1 def _snake_case ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: '''simple docstring''' A: Optional[Any] = algorithm(self ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]: '''simple docstring''' A: str = flow_network A: List[str] = flow_network.verticesCount A: Dict = flow_network.sourceIndex A: Any = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A: str = flow_network.graph A: str = False def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' if not self.executed: self._algorithm() A: str = True def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) # use this to save your result A: Any = -1 def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )] A: Any = [0] * self.verticies_count A: Optional[Any] = [0] * self.verticies_count def _snake_case ( self : str ) -> Optional[Any]: '''simple docstring''' A: Any = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A: str = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A: Dict = 0 while i < len(SCREAMING_SNAKE_CASE_ ): A: Any = vertices_list[i] A: str = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) ) A: Tuple = 0 else: i += 1 A: Tuple = sum(self.preflow[self.source_index] ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.relabel(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int: '''simple docstring''' A: Optional[int] = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int: '''simple docstring''' A: Optional[Any] = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A: List[Any] = self.heights[to_index] if min_height is not None: A: int = min_height + 1 if __name__ == "__main__": UpperCamelCase = [0] UpperCamelCase = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network UpperCamelCase = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate UpperCamelCase = flow_network.find_maximum_flow() print(f'maximum flow is {maximum_flow}')
334
1
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar UpperCamelCase = TypeVar('''KEY''') UpperCamelCase = TypeVar('''VAL''') @dataclass(frozen=UpperCAmelCase_ , slots=UpperCAmelCase_ ) class lowerCAmelCase_ ( Generic[KEY, VAL] ): '''simple docstring''' UpperCamelCase_ : KEY UpperCamelCase_ : VAL class lowerCAmelCase_ ( _Item ): '''simple docstring''' def __init__( self : Union[str, Any] ) -> None: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __bool__( self : str ) -> bool: '''simple docstring''' return False UpperCamelCase = _DeletedItem() class lowerCAmelCase_ ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : float = 0.75 ) -> None: '''simple docstring''' A: Union[str, Any] = initial_block_size A: list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 A: int = capacity_factor A: List[Any] = 0 def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : KEY ) -> int: '''simple docstring''' return hash(SCREAMING_SNAKE_CASE_ ) % len(self._buckets ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> int: '''simple docstring''' return (ind + 1) % len(self._buckets ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : KEY , SCREAMING_SNAKE_CASE_ : VAL ) -> bool: '''simple docstring''' A: int = self._buckets[ind] if not stored: A: Union[str, Any] = _Item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self._len += 1 return True elif stored.key == key: A: Any = _Item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return True else: return False def _snake_case ( self : Union[str, Any] ) -> bool: '''simple docstring''' A: Optional[int] = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> bool: '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False A: List[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' A: Optional[int] = self._buckets A: Union[str, Any] = [None] * new_size A: Any = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _snake_case ( self : Tuple ) -> None: '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def _snake_case ( self : Tuple ) -> None: '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : KEY ) -> Iterator[int]: '''simple docstring''' A: Tuple = self._get_bucket_index(SCREAMING_SNAKE_CASE_ ) for _ in range(len(self._buckets ) ): yield ind A: int = self._get_next_ind(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : KEY , SCREAMING_SNAKE_CASE_ : VAL ) -> None: '''simple docstring''' for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ): if self._try_set(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): break def __setitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : KEY , SCREAMING_SNAKE_CASE_ : VAL ) -> None: '''simple docstring''' if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __delitem__( self : Any , SCREAMING_SNAKE_CASE_ : KEY ) -> None: '''simple docstring''' for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE_ ) if item is _deleted: continue if item.key == key: A: str = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE_ : KEY ) -> VAL: '''simple docstring''' for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ): A: Union[str, Any] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE_ ) def __len__( self : List[Any] ) -> int: '''simple docstring''' return self._len def __iter__( self : Optional[Any] ) -> Iterator[KEY]: '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: '''simple docstring''' A: Any = ''' ,'''.join( f"""{item.key}: {item.val}""" for item in self._buckets if item ) return f"""HashMap({val_string})"""
334
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ["""input_features""", """attention_mask"""] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : int=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]: '''simple docstring''' super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = num_mel_bins A: str = do_ceptral_normalize A: int = normalize_means A: List[Any] = normalize_vars A: Any = True def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , ) -> np.ndarray: '''simple docstring''' A: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) A: List[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> np.ndarray: '''simple docstring''' if normalize_means: A: str = x[:input_length].mean(axis=0 ) A: Dict = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if normalize_vars: A: Tuple = x[:input_length].std(axis=0 ) A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if input_length < x.shape[0]: A: Optional[int] = padding_value # make sure array is in float32 A: Optional[Any] = x.astype(np.floataa ) return x def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]: '''simple docstring''' A: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A: Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) A: Optional[Any] = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): A: int = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A: Any = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A: Union[str, Any] = [raw_speech] # extract fbank features A: str = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech] # convert into correct format for padding A: int = BatchFeature({'''input_features''': features} ) A: int = self.pad( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # make sure list is in array format A: List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ): A: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features] A: List[Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A: Dict = ( np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD else None ) A: List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ ) if return_tensors is not None: A: Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return padded_inputs
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { '''configuration_mask2former''': [ '''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Mask2FormerConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Mask2FormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Mask2FormerForUniversalSegmentation''', '''Mask2FormerModel''', '''Mask2FormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
334
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = DebertaTokenizer UpperCamelCase_ : List[str] = True UpperCamelCase_ : int = DebertaTokenizerFast def _snake_case ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A: Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] A: Union[str, Any] = {'''unk_token''': '''[UNK]'''} A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]: '''simple docstring''' A: Optional[int] = '''lower newer''' A: str = '''lower newer''' return input_text, output_text def _snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A: str = self.get_tokenizer() A: Any = '''lower newer''' A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokens + [tokenizer.unk_token] A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Any: '''simple docstring''' A: str = self.get_tokenizer() A: List[str] = tokenizer('''Hello''' , '''World''' ) A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Tuple ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: int = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Dict = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']] # fmt: off A: Any = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on A: Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ ) for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase = '''▁''' UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = BertGenerationTokenizer UpperCamelCase_ : Optional[int] = False UpperCamelCase_ : Tuple = True def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' super().setUp() A: str = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' A: Tuple = '''<s>''' A: int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> int: '''simple docstring''' A: Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 10_02 ) def _snake_case ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def _snake_case ( self : Tuple ) -> Optional[int]: '''simple docstring''' A: str = BertGenerationTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [2_85, 46, 10, 1_70, 3_82] , ) A: Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) A: List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) A: Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def _snake_case ( self : Optional[Any] ) -> List[str]: '''simple docstring''' return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def _snake_case ( self : Any ) -> Optional[int]: '''simple docstring''' A: Dict = '''Hello World!''' A: int = [1_85_36, 22_60, 1_01] self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def _snake_case ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) A: List[Any] = [ 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @require_torch @slow def _snake_case ( self : str ) -> Union[str, Any]: '''simple docstring''' import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence A: List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] A: Any = ''' '''.join(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) A: List[Any] = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) A: Dict = BertGenerationConfig() A: Tuple = BertGenerationEncoder(SCREAMING_SNAKE_CASE_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**SCREAMING_SNAKE_CASE_ ) model(**SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: Optional[int] = {'''input_ids''': [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
334
'''simple docstring''' import requests UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def SCREAMING_SNAKE_CASE( __lowercase ) -> None: # fetching a list of articles in json format A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['''articles'''] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: if len(__lowercase ) != len(__lowercase ): raise ValueError('''String lengths must match!''' ) A: str = 0 for chara, chara in zip(__lowercase , __lowercase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } UpperCamelCase = { '''camembert-base''': 512, } UpperCamelCase = '''▁''' class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : int = CamembertTokenizer def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : Any , ) -> Any: '''simple docstring''' A: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Any = vocab_file A: Any = False if not self.vocab_file else True def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: List[str] = [self.cls_token_id] A: List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: List[str] = [self.sep_token_id] A: Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Dict = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
334
1
'''simple docstring''' import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', } UpperCamelCase = { '''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''}, '''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''}, } UpperCamelCase = { '''ctrl''': 256, } UpperCamelCase = { '''Pregnancy''': 168629, '''Christianity''': 7675, '''Explain''': 106423, '''Fitness''': 63440, '''Saving''': 63163, '''Ask''': 27171, '''Ass''': 95985, '''Joke''': 163509, '''Questions''': 45622, '''Thoughts''': 49605, '''Retail''': 52342, '''Feminism''': 164338, '''Writing''': 11992, '''Atheism''': 192263, '''Netflix''': 48616, '''Computing''': 39639, '''Opinion''': 43213, '''Alone''': 44967, '''Funny''': 58917, '''Gaming''': 40358, '''Human''': 4088, '''India''': 1331, '''Joker''': 77138, '''Diet''': 36206, '''Legal''': 11859, '''Norman''': 4939, '''Tip''': 72689, '''Weight''': 52343, '''Movies''': 46273, '''Running''': 23425, '''Science''': 2090, '''Horror''': 37793, '''Confession''': 60572, '''Finance''': 12250, '''Politics''': 16360, '''Scary''': 191985, '''Support''': 12654, '''Technologies''': 32516, '''Teenage''': 66160, '''Event''': 32769, '''Learned''': 67460, '''Notion''': 182770, '''Wikipedia''': 37583, '''Books''': 6665, '''Extract''': 76050, '''Confessions''': 102701, '''Conspiracy''': 75932, '''Links''': 63674, '''Narcissus''': 150425, '''Relationship''': 54766, '''Relationships''': 134796, '''Reviews''': 41671, '''News''': 4256, '''Translation''': 26820, '''multilingual''': 128406, } def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]: A: Dict = set() A: Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A: Optional[int] = char A: Union[str, Any] = set(__lowercase ) return pairs class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : str = VOCAB_FILES_NAMES UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] = CONTROL_CODES def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any="<unk>" , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict: '''simple docstring''' super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: int = json.load(SCREAMING_SNAKE_CASE_ ) A: str = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: A: List[Any] = merges_handle.read().split('''\n''' )[1:-1] A: Any = [tuple(merge.split() ) for merge in merges] A: Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Any = {} @property def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' return len(self.encoder ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ) -> Tuple: '''simple docstring''' if token in self.cache: return self.cache[token] A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ ) A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) A: int = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: A: Union[str, Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break A , A: Any = bigram A: Optional[int] = [] A: Dict = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: A: Optional[Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A: List[Any] = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A: Optional[int] = tuple(SCREAMING_SNAKE_CASE_ ) A: List[Any] = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: A: Tuple = get_pairs(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = '''@@ '''.join(SCREAMING_SNAKE_CASE_ ) A: Any = word[:-4] A: Tuple = word return word def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' A: int = [] A: Any = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) ) return split_tokens def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]: '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict: '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]: '''simple docstring''' A: Tuple = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip() return out_string def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Optional[Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) A: Any = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) A: Optional[int] = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) A: Optional[Any] = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
334
'''simple docstring''' import os from distutils.util import strtobool def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]: for e in env_keys: A: Dict = int(os.environ.get(__lowercase , -1 ) ) if val >= 0: return val return default def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> List[str]: A: str = os.environ.get(__lowercase , str(__lowercase ) ) return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int... def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="no" ) -> str: A: Optional[int] = os.environ.get(__lowercase , str(__lowercase ) ) return value
334
1
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = (PNDMScheduler,) UpperCamelCase_ : List[Any] = (("""num_inference_steps""", 50),) def _snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Any: '''simple docstring''' A: Optional[Any] = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**SCREAMING_SNAKE_CASE_ ) return config def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : int=0 , **SCREAMING_SNAKE_CASE_ : Dict ) -> List[str]: '''simple docstring''' A: List[str] = dict(self.forward_default_kwargs ) A: Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ ) A: int = self.dummy_sample A: Optional[int] = 0.1 * sample A: List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A: List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residuals A: int = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residuals A: Union[str, Any] = dummy_past_residuals[:] A: Any = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample A: Union[str, Any] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" A: Union[str, Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample A: Tuple = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' pass def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = dict(self.forward_default_kwargs ) A: Optional[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = self.dummy_sample A: Tuple = 0.1 * sample A: List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A: Union[str, Any] = self.get_scheduler_config() A: Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residuals (must be after setting timesteps) A: str = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE_ ) A: List[str] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # copy over dummy past residual (must be after setting timesteps) A: str = dummy_past_residuals[:] A: str = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample A: Any = new_scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" A: int = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample A: Union[str, Any] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _snake_case ( self : List[Any] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = self.scheduler_classes[0] A: Any = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ ) A: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) A: List[str] = 10 A: str = self.dummy_model() A: str = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(scheduler.prk_timesteps ): A: Any = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: str = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A: Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Dict = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample return sample def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' A: List[str] = dict(self.forward_default_kwargs ) A: Optional[int] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE_ ) for scheduler_class in self.scheduler_classes: A: Optional[int] = self.get_scheduler_config() A: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) A: List[str] = self.dummy_sample A: Tuple = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE_ , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE_ , '''set_timesteps''' ): A: Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A: Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A: Dict = dummy_past_residuals[:] A: List[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample A: Union[str, Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A: Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample A: Dict = scheduler.step_plms(SCREAMING_SNAKE_CASE_ , 1 , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _snake_case ( self : Any ) -> Dict: '''simple docstring''' for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] ) -> Any: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = self.scheduler_classes[0] A: List[str] = self.get_scheduler_config(steps_offset=1 ) A: int = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> List[str]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> str: '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Any: '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A: Union[str, Any] = 27 for scheduler_class in self.scheduler_classes: A: int = self.dummy_sample A: List[Any] = 0.1 * sample A: str = self.get_scheduler_config() A: List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A: str = scheduler.step_prk(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample def _snake_case ( self : int ) -> str: '''simple docstring''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: List[str] = self.scheduler_classes[0] A: Tuple = self.get_scheduler_config() A: List[str] = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def _snake_case ( self : int ) -> Tuple: '''simple docstring''' A: Optional[int] = self.full_loop() A: int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) A: Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def _snake_case ( self : Dict ) -> Tuple: '''simple docstring''' A: List[str] = self.full_loop(prediction_type='''v_prediction''' ) A: List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) A: str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' A: str = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 ) A: str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) A: Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def _snake_case ( self : Union[str, Any] ) -> int: '''simple docstring''' A: int = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 ) A: List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) A: int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
334
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() UpperCamelCase = logging.get_logger('''transformers.models.encodec''') UpperCamelCase = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } UpperCamelCase = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } UpperCamelCase = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } UpperCamelCase = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } UpperCamelCase = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } UpperCamelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } UpperCamelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } UpperCamelCase = [] UpperCamelCase = [] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict: for attribute in key.split('''.''' ): A: Union[str, Any] = getattr(__lowercase , __lowercase ) if weight_type is not None: A: Tuple = getattr(__lowercase , __lowercase ).shape else: A: str = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": A: Dict = value elif weight_type == "weight_g": A: Tuple = value elif weight_type == "weight_v": A: Any = value elif weight_type == "bias": A: str = value elif weight_type == "running_mean": A: List[Any] = value elif weight_type == "running_var": A: Dict = value elif weight_type == "num_batches_tracked": A: List[str] = value elif weight_type == "weight_ih_l0": A: Dict = value elif weight_type == "weight_hh_l0": A: Optional[int] = value elif weight_type == "bias_ih_l0": A: List[Any] = value elif weight_type == "bias_hh_l0": A: str = value elif weight_type == "weight_ih_l1": A: Optional[int] = value elif weight_type == "weight_hh_l1": A: int = value elif weight_type == "bias_ih_l1": A: Optional[Any] = value elif weight_type == "bias_hh_l1": A: str = value else: A: Optional[int] = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: A , A: Any = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: A: Any = [] if model_name == "encodec_24khz" or "encodec_32khz": A: List[str] = MAPPING_24K elif model_name == "encodec_48khz": A: List[Any] = MAPPING_48K else: raise ValueError(F"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(__lowercase , __lowercase ): logger.info(F"""{name} was ignored""" ) continue A: Optional[int] = False for key, mapped_key in MAPPING.items(): if "*" in key: A , A: Optional[int] = key.split('''.*.''' ) if prefix in name and suffix in name: A: str = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue A: Optional[Any] = True if "*" in mapped_key: A: Any = name.split(__lowercase )[0].split('''.''' )[-2] A: Tuple = mapped_key.replace('''*''' , __lowercase ) if "weight_g" in name: A: str = '''weight_g''' elif "weight_v" in name: A: List[Any] = '''weight_v''' elif "weight_ih_l0" in name: A: Dict = '''weight_ih_l0''' elif "weight_hh_l0" in name: A: int = '''weight_hh_l0''' elif "bias_ih_l0" in name: A: Union[str, Any] = '''bias_ih_l0''' elif "bias_hh_l0" in name: A: Tuple = '''bias_hh_l0''' elif "weight_ih_l1" in name: A: int = '''weight_ih_l1''' elif "weight_hh_l1" in name: A: Optional[Any] = '''weight_hh_l1''' elif "bias_ih_l1" in name: A: Dict = '''bias_ih_l1''' elif "bias_hh_l1" in name: A: str = '''bias_hh_l1''' elif "bias" in name: A: Union[str, Any] = '''bias''' elif "weight" in name: A: Dict = '''weight''' elif "running_mean" in name: A: Tuple = '''running_mean''' elif "running_var" in name: A: Any = '''running_var''' elif "num_batches_tracked" in name: A: str = '''num_batches_tracked''' else: A: Tuple = None set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) continue if not is_used: unused_weights.append(__lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ) -> Dict: if config_path is not None: A: Tuple = EncodecConfig.from_pretrained(__lowercase ) else: A: Union[str, Any] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A: Union[str, Any] = [8, 5, 4, 4] A: Dict = [2.2] A: List[Any] = 6_4 A: Optional[Any] = 3_2_0_0_0 A: List[Any] = 2_0_4_8 A: Optional[Any] = False A: int = False A: Union[str, Any] = False elif model_name == "encodec_48khz": A: Optional[int] = [8, 5, 4, 2] A: List[Any] = [3.0, 6.0, 1_2.0, 2_4.0] A: List[Any] = 4_8_0_0_0 A: int = 2 A: List[Any] = False A: Any = '''time_group_norm''' A: Optional[Any] = True A: Any = 1.0 A: Any = 0.0_1 else: raise ValueError(F"""Unknown model name: {model_name}""" ) A: str = EncodecModel(__lowercase ) A: Optional[Any] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(__lowercase ) A: Union[str, Any] = torch.load(__lowercase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A: Optional[int] = original_checkpoint['''best_state'''] recursively_load_weights(__lowercase , __lowercase , __lowercase ) model.save_pretrained(__lowercase ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(__lowercase ) model.push_to_hub(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: Tuple = len(__lowercase ) for i in range(length - 1 ): A: Dict = i for k in range(i + 1 , __lowercase ): if collection[k] < collection[least]: A: List[str] = k if least != i: A , A: Tuple = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
334
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for data in source_data: for i, el in enumerate(__lowercase ): if len(__lowercase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowercase ) ) return data_lists def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for dlist, weight in zip(__lowercase , __lowercase ): A: List[str] = min(__lowercase ) A: Union[str, Any] = max(__lowercase ) A: list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A: List[str] = F"""Invalid weight of {weight:f} provided""" raise ValueError(__lowercase ) score_lists.append(__lowercase ) return score_lists def SCREAMING_SNAKE_CASE( __lowercase ) -> list[float]: A: list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowercase ): A: str = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: Any = get_data(__lowercase ) A: str = calculate_each_score(__lowercase , __lowercase ) A: int = generate_final_scores(__lowercase ) # append scores to source data for i, ele in enumerate(__lowercase ): source_data[i].append(__lowercase ) return source_data
334
1
'''simple docstring''' import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging UpperCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: A: Tuple = set() A: int = [] def parse_line(__lowercase ): for line in fp: if isinstance(__lowercase , __lowercase ): A: List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(__lowercase ) > 0: A: str = '''\n'''.join(__lowercase ) # Only keep the warnings specified in `targets` if any(F""": {x}: """ in warning for x in targets ): selected_warnings.add(__lowercase ) buffer.clear() continue else: A: Union[str, Any] = line.strip() buffer.append(__lowercase ) if from_gh: for filename in os.listdir(__lowercase ): A: int = os.path.join(__lowercase , __lowercase ) if not os.path.isdir(__lowercase ): # read the file if filename != "warnings.txt": continue with open(__lowercase ) as fp: parse_line(__lowercase ) else: try: with zipfile.ZipFile(__lowercase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowercase ): # read the file if filename != "warnings.txt": continue with z.open(__lowercase ) as fp: parse_line(__lowercase ) except Exception: logger.warning( F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: A: Union[str, Any] = set() A: Optional[int] = [os.path.join(__lowercase , __lowercase ) for p in os.listdir(__lowercase ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(__lowercase , __lowercase ) ) return selected_warnings if __name__ == "__main__": def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: return values.split(''',''' ) UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') # optional parameters parser.add_argument( '''--targets''', default='''DeprecationWarning,UserWarning,FutureWarning''', type=list_str, help='''Comma-separated list of target warning(s) which we want to extract.''', ) parser.add_argument( '''--from_gh''', action='''store_true''', help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links UpperCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('''=''' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts UpperCamelCase = extract_warnings(args.output_dir, args.targets) UpperCamelCase = sorted(selected_warnings) with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
334
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = DPRContextEncoderTokenizer class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Dict = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Optional[int] = DPRQuestionEncoderTokenizer UpperCamelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) UpperCamelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ : '''simple docstring''' def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) elif titles is None or texts is None: A: Union[str, Any] = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Union[str, Any] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles] A: Optional[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts] A: str = len(SCREAMING_SNAKE_CASE_ ) A: List[Any] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages assert len(SCREAMING_SNAKE_CASE_ ) == len( SCREAMING_SNAKE_CASE_ ), f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts.""" A: Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: Dict = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: str = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] } if return_attention_mask is not False: A: Union[str, Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) A: Optional[Any] = attention_mask return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : BatchEncoding , SCREAMING_SNAKE_CASE_ : DPRReaderOutput , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : int = 64 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Any = reader_input['''input_ids'''] A , A , A: str = reader_output[:3] A: str = len(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ ) A: List[DPRReaderOutput] = [] for doc_id in sorted_docs: A: List[str] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence A: Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A: Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: A: int = len(SCREAMING_SNAKE_CASE_ ) A: Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(SCREAMING_SNAKE_CASE_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Union[str, Any] = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) A: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ ) A: Dict = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A: int = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(SCREAMING_SNAKE_CASE_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : Optional[Any] = DPRReaderTokenizer
334
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } UpperCamelCase = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } UpperCamelCase = {'''facebook/blenderbot-3B''': 128} class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Dict = VOCAB_FILES_NAMES UpperCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[int] = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : Union[str, Any] = BlenderbotTokenizer def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Dict="replace" , SCREAMING_SNAKE_CASE_ : List[str]="<s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Tuple="<s>" , SCREAMING_SNAKE_CASE_ : Any="<unk>" , SCREAMING_SNAKE_CASE_ : List[str]="<pad>" , SCREAMING_SNAKE_CASE_ : int="<mask>" , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : Any=True , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Any: '''simple docstring''' super().__init__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: A: Union[str, Any] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('''type''' ) ) A: str = add_prefix_space A: Tuple = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) A: Optional[int] = add_prefix_space A: int = '''post_processor''' A: Tuple = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: A: Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A: Tuple = tuple(state['''sep'''] ) if "cls" in state: A: Union[str, Any] = tuple(state['''cls'''] ) A: Union[str, Any] = False if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: A: str = add_prefix_space A: str = True if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE_ ) != trim_offsets: A: Dict = trim_offsets A: List[str] = True if changes_to_apply: A: str = getattr(SCREAMING_SNAKE_CASE_ , state.pop('''type''' ) ) A: Dict = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case ( self : Any ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int: '''simple docstring''' A: Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else value A: List[Any] = value def _snake_case ( self : str , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> BatchEncoding: '''simple docstring''' A: int = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> BatchEncoding: '''simple docstring''' A: Optional[Any] = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' A: Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: Any = [self.sep_token_id] A: Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> str: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : "Conversation" ) -> List[int]: '''simple docstring''' A: Any = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(SCREAMING_SNAKE_CASE_ ) A: int = ''' '''.join(SCREAMING_SNAKE_CASE_ ) A: Dict = self.encode(SCREAMING_SNAKE_CASE_ ) if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length: A: Any = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets UpperCamelCase = ''' IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. ''' UpperCamelCase = ''' Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric("mean_iou") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} ''' UpperCamelCase = '''\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = False , ) -> Optional[int]: if label_map is not None: for old_id, new_id in label_map.items(): A: List[Any] = new_id # turn into Numpy arrays A: Union[str, Any] = np.array(__lowercase ) A: Union[str, Any] = np.array(__lowercase ) if reduce_labels: A: List[Any] = 2_5_5 A: Union[str, Any] = label - 1 A: Optional[Any] = 2_5_5 A: Optional[int] = label != ignore_index A: Optional[int] = np.not_equal(__lowercase , __lowercase ) A: Optional[int] = pred_label[mask] A: Optional[int] = np.array(__lowercase )[mask] A: List[Any] = pred_label[pred_label == label] A: int = np.histogram(__lowercase , bins=__lowercase , range=(0, num_labels - 1) )[0] A: List[str] = np.histogram(__lowercase , bins=__lowercase , range=(0, num_labels - 1) )[0] A: str = np.histogram(__lowercase , bins=__lowercase , range=(0, num_labels - 1) )[0] A: Dict = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = False , ) -> List[str]: A: Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa ) A: str = np.zeros((num_labels,) , dtype=np.floataa ) A: Dict = np.zeros((num_labels,) , dtype=np.floataa ) A: Optional[Any] = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(__lowercase , __lowercase ): A , A , A , A: Union[str, Any] = intersect_and_union( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ) -> List[Any]: A , A , A , A: List[str] = total_intersect_and_union( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # compute metrics A: Optional[int] = {} A: Optional[Any] = total_area_intersect.sum() / total_area_label.sum() A: List[Any] = total_area_intersect / total_area_union A: Union[str, Any] = total_area_intersect / total_area_label A: List[Any] = np.nanmean(__lowercase ) A: Dict = np.nanmean(__lowercase ) A: List[str] = all_acc A: List[str] = iou A: List[str] = acc if nan_to_num is not None: A: str = {metric: np.nan_to_num(__lowercase , nan=__lowercase ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self : Any ) -> str: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[int, int]] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> List[Any]: '''simple docstring''' A: Dict = mean_iou( results=SCREAMING_SNAKE_CASE_ , gt_seg_maps=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , ignore_index=SCREAMING_SNAKE_CASE_ , nan_to_num=SCREAMING_SNAKE_CASE_ , label_map=SCREAMING_SNAKE_CASE_ , reduce_labels=SCREAMING_SNAKE_CASE_ , ) return iou_result
334
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' pass class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None: '''simple docstring''' A: Any = data A: Node | None = None def __iter__( self : Optional[int] ) -> List[str]: '''simple docstring''' A: List[str] = self A: Dict = [] while node: if node in visited: raise ContainsLoopError visited.append(SCREAMING_SNAKE_CASE_ ) yield node.data A: str = node.next_node @property def _snake_case ( self : List[str] ) -> bool: '''simple docstring''' try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCamelCase = Node(1) UpperCamelCase = Node(2) UpperCamelCase = Node(3) UpperCamelCase = Node(4) print(root_node.has_loop) # False UpperCamelCase = root_node.next_node print(root_node.has_loop) # True UpperCamelCase = Node(5) UpperCamelCase = Node(6) UpperCamelCase = Node(5) UpperCamelCase = Node(6) print(root_node.has_loop) # False UpperCamelCase = Node(1) print(root_node.has_loop) # False
334
1
'''simple docstring''' import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase = '''src/diffusers''' UpperCamelCase = '''.''' # This is to make sure the diffusers module imported is the one in the repo. UpperCamelCase = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) UpperCamelCase = spec.loader.load_module() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str: return line.startswith(__lowercase ) or len(__lowercase ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , __lowercase ) is not None def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: int = object_name.split('''.''' ) A: Optional[Any] = 0 # First let's find the module where our object lives. A: Union[str, Any] = parts[i] while i < len(__lowercase ) and not os.path.isfile(os.path.join(__lowercase , F"""{module}.py""" ) ): i += 1 if i < len(__lowercase ): A: Tuple = os.path.join(__lowercase , parts[i] ) if i >= len(__lowercase ): raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(__lowercase , F"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A: List[str] = f.readlines() # Now let's find the class / func in the code! A: Optional[Any] = '''''' A: List[Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(__lowercase ) and re.search(rF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(__lowercase ): raise ValueError(F""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). A: Optional[Any] = line_index while line_index < len(__lowercase ) and _should_continue(lines[line_index] , __lowercase ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 A: Optional[Any] = lines[start_index:line_index] return "".join(__lowercase ) UpperCamelCase = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') UpperCamelCase = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''') UpperCamelCase = re.compile(R'''<FILL\s+[^>]*>''') def SCREAMING_SNAKE_CASE( __lowercase ) -> int: A: Union[str, Any] = code.split('''\n''' ) A: int = 0 while idx < len(__lowercase ) and len(lines[idx] ) == 0: idx += 1 if idx < len(__lowercase ): return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]: A: int = len(get_indent(__lowercase ) ) > 0 if has_indent: A: List[str] = F"""class Bla:\n{code}""" A: List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=__lowercase ) A: int = black.format_str(__lowercase , mode=__lowercase ) A , A: List[Any] = style_docstrings_in_code(__lowercase ) return result[len('''class Bla:\n''' ) :] if has_indent else result def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> str: with open(__lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A: List[Any] = f.readlines() A: Tuple = [] A: Dict = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(__lowercase ): A: Tuple = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. A , A , A: Any = search.groups() A: List[Any] = find_code_in_diffusers(__lowercase ) A: Tuple = get_indent(__lowercase ) A: List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2 A: Dict = theoretical_indent A: Optional[Any] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. A: Tuple = True while line_index < len(__lowercase ) and should_continue: line_index += 1 if line_index >= len(__lowercase ): break A: Optional[Any] = lines[line_index] A: int = _should_continue(__lowercase , __lowercase ) and re.search(F"""^{indent}# End copy""" , __lowercase ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 A: Any = lines[start_index:line_index] A: Dict = ''''''.join(__lowercase ) # Remove any nested `Copied from` comments to avoid circular copies A: Any = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(__lowercase ) is None] A: Union[str, Any] = '''\n'''.join(__lowercase ) # Before comparing, use the `replace_pattern` on the original code. if len(__lowercase ) > 0: A: Optional[Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) A: List[str] = [_re_replace_pattern.search(__lowercase ) for p in patterns] for pattern in patterns: if pattern is None: continue A , A , A: List[str] = pattern.groups() A: Tuple = re.sub(__lowercase , __lowercase , __lowercase ) if option.strip() == "all-casing": A: str = re.sub(obja.lower() , obja.lower() , __lowercase ) A: Dict = re.sub(obja.upper() , obja.upper() , __lowercase ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line A: Any = blackify(lines[start_index - 1] + theoretical_code ) A: Any = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: A: Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:] A: str = start_index + 1 if overwrite and len(__lowercase ) > 0: # Warn the user a file has been modified. print(F"""Detected changes, rewriting {filename}.""" ) with open(__lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__lowercase ) return diffs def SCREAMING_SNAKE_CASE( __lowercase = False ) -> Any: A: Dict = glob.glob(os.path.join(__lowercase , '''**/*.py''' ) , recursive=__lowercase ) A: Tuple = [] for filename in all_files: A: List[Any] = is_copy_consistent(__lowercase , __lowercase ) diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(__lowercase ) > 0: A: Tuple = '''\n'''.join(__lowercase ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') UpperCamelCase = parser.parse_args() check_copies(args.fix_and_overwrite)
334
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase = 4 ) -> list[list[int]]: A: Tuple = abs(__lowercase ) or 4 return [[1 + x + y * row_size for x in range(__lowercase )] for y in range(__lowercase )] def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(transpose(__lowercase ) ) # OR.. transpose(reverse_column(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(reverse_column(__lowercase ) ) # OR.. reverse_column(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_column(transpose(__lowercase ) ) # OR.. transpose(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Union[str, Any] = [list(__lowercase ) for x in zip(*__lowercase )] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[int] = matrix[::-1] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[Any] = [x[::-1] for x in matrix] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> None: for i in matrix: print(*__lowercase ) if __name__ == "__main__": UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 90 counterclockwise:\n''') print_matrix(rotate_aa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 180:\n''') print_matrix(rotate_aaa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 270 counterclockwise:\n''') print_matrix(rotate_aaa(matrix))
334
1
'''simple docstring''' import random def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: A: List[str] = num - 1 A: Any = 0 while s % 2 == 0: A: str = s // 2 t += 1 for _ in range(5 ): A: List[Any] = random.randrange(2 , num - 1 ) A: List[str] = pow(__lowercase , __lowercase , __lowercase ) if v != 1: A: Optional[int] = 0 while v != (num - 1): if i == t - 1: return False else: A: Optional[Any] = i + 1 A: Union[str, Any] = (v**2) % num return True def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if num < 2: return False A: str = [ 2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1, 4_3, 4_7, 5_3, 5_9, 6_1, 6_7, 7_1, 7_3, 7_9, 8_3, 8_9, 9_7, 1_0_1, 1_0_3, 1_0_7, 1_0_9, 1_1_3, 1_2_7, 1_3_1, 1_3_7, 1_3_9, 1_4_9, 1_5_1, 1_5_7, 1_6_3, 1_6_7, 1_7_3, 1_7_9, 1_8_1, 1_9_1, 1_9_3, 1_9_7, 1_9_9, 2_1_1, 2_2_3, 2_2_7, 2_2_9, 2_3_3, 2_3_9, 2_4_1, 2_5_1, 2_5_7, 2_6_3, 2_6_9, 2_7_1, 2_7_7, 2_8_1, 2_8_3, 2_9_3, 3_0_7, 3_1_1, 3_1_3, 3_1_7, 3_3_1, 3_3_7, 3_4_7, 3_4_9, 3_5_3, 3_5_9, 3_6_7, 3_7_3, 3_7_9, 3_8_3, 3_8_9, 3_9_7, 4_0_1, 4_0_9, 4_1_9, 4_2_1, 4_3_1, 4_3_3, 4_3_9, 4_4_3, 4_4_9, 4_5_7, 4_6_1, 4_6_3, 4_6_7, 4_7_9, 4_8_7, 4_9_1, 4_9_9, 5_0_3, 5_0_9, 5_2_1, 5_2_3, 5_4_1, 5_4_7, 5_5_7, 5_6_3, 5_6_9, 5_7_1, 5_7_7, 5_8_7, 5_9_3, 5_9_9, 6_0_1, 6_0_7, 6_1_3, 6_1_7, 6_1_9, 6_3_1, 6_4_1, 6_4_3, 6_4_7, 6_5_3, 6_5_9, 6_6_1, 6_7_3, 6_7_7, 6_8_3, 6_9_1, 7_0_1, 7_0_9, 7_1_9, 7_2_7, 7_3_3, 7_3_9, 7_4_3, 7_5_1, 7_5_7, 7_6_1, 7_6_9, 7_7_3, 7_8_7, 7_9_7, 8_0_9, 8_1_1, 8_2_1, 8_2_3, 8_2_7, 8_2_9, 8_3_9, 8_5_3, 8_5_7, 8_5_9, 8_6_3, 8_7_7, 8_8_1, 8_8_3, 8_8_7, 9_0_7, 9_1_1, 9_1_9, 9_2_9, 9_3_7, 9_4_1, 9_4_7, 9_5_3, 9_6_7, 9_7_1, 9_7_7, 9_8_3, 9_9_1, 9_9_7, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase = 1_0_2_4 ) -> int: while True: A: int = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(__lowercase ): return num if __name__ == "__main__": UpperCamelCase = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
334
'''simple docstring''' from __future__ import annotations import numpy as np def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: return np.maximum(0 , __lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
334
1
'''simple docstring''' import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate UpperCamelCase = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('''''', '''|''', '''|'''), datarow=DataRow('''''', '''|''', '''|'''), padding=1, with_header_hide=None, ) UpperCamelCase = [] UpperCamelCase = [] UpperCamelCase = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}} UpperCamelCase = [ { '''type''': '''header''', '''text''': { '''type''': '''plain_text''', '''text''': f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results', '''emoji''': True, }, } ] UpperCamelCase = 0 for log in Path().glob('''*.log'''): UpperCamelCase = 0 with open(log, '''r''') as f: for line in f: UpperCamelCase = json.loads(line) if line.get('''nodeid''', '''''') != "": UpperCamelCase = line['''nodeid'''] if line.get('''duration''', None) is not None: UpperCamelCase = f'{line["duration"]:.4f}' if line.get('''outcome''', '''''') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('''_''')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) UpperCamelCase = [] log.unlink() UpperCamelCase = '''''' UpperCamelCase = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" UpperCamelCase = [] UpperCamelCase = {} for test in failed_tests: UpperCamelCase = test[0].split('''::''') UpperCamelCase = data[0].split('''/''')[-1] if data[0] not in filesafailed: UpperCamelCase = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) UpperCamelCase = [test[0] for test in failed_table] UpperCamelCase = list(set(files)) # Count number of instances in failed_tests UpperCamelCase = [] for file in individual_files: table.append([file, len(filesafailed[file])]) UpperCamelCase = tabulate( table, headers=['''Test Location''', '''Num Failed'''], tablefmt=hf_table_format, stralign='''right''', ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: UpperCamelCase = '''Too many failed tests, please see the full report in the Action results.''' UpperCamelCase = len(err) + 10 UpperCamelCase = message[: 3000 - offset] + f'\n...\n```\n{err}' print(f'### {message}') else: UpperCamelCase = '''No failed tests! 🤗''' print(f'## {message}') payload.append(no_error_payload) if os.environ.get('''TEST_TYPE''', '''''') != "": from slack_sdk import WebClient UpperCamelCase = WebClient(token=os.environ['''SLACK_API_TOKEN''']) if message != "No failed tests! 🤗": UpperCamelCase = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': message, }, } payload.append(md_report) UpperCamelCase = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': '''*For more details:*''', }, '''accessory''': { '''type''': '''button''', '''text''': { '''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True, }, '''url''': f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } payload.append(action_button) UpperCamelCase = { '''type''': '''context''', '''elements''': [ { '''type''': '''plain_text''', '''text''': f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}', } ], } payload.append(date_report) UpperCamelCase = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload) UpperCamelCase = response.data['''ts'''] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name UpperCamelCase = '''''' for i, row in enumerate(test_failures): if row[0] != test_class: UpperCamelCase = row[0] else: UpperCamelCase = '''''' UpperCamelCase = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```', }, } client.chat_postMessage( channel='''#accelerate-ci-daily''', thread_ts=ts, blocks=[payload], )
334
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' @register_to_config def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 6_55_36 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Tuple: '''simple docstring''' super().__init__() A: Optional[Any] = sample_size # time if time_embedding_type == "fourier": A: Tuple = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ) A: List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": A: str = Timesteps( block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ ) A: Any = block_out_channels[0] if use_timestep_embedding: A: Optional[Any] = block_out_channels[0] * 4 A: List[Any] = TimestepEmbedding( in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , ) A: Optional[Any] = nn.ModuleList([] ) A: str = None A: str = nn.ModuleList([] ) A: Tuple = None # down A: Any = in_channels for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = output_channel A: List[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels A: List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[int] = get_down_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid A: Union[str, Any] = get_mid_block( SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , ) # up A: Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) ) A: List[str] = reversed_block_out_channels[0] if out_block_type is None: A: int = out_channels else: A: Union[str, Any] = block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: List[Any] = output_channel A: int = ( reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels ) A: Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[Any] = get_up_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) A: Any = output_channel # out A: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) A: Optional[int] = get_out_block( out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[UNetaDOutput, Tuple]: '''simple docstring''' A: Any = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): A: Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: A: List[str] = timesteps[None].to(sample.device ) A: int = self.time_proj(SCREAMING_SNAKE_CASE_ ) if self.config.use_timestep_embedding: A: List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ ) else: A: str = timestep_embed[..., None] A: Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) A: Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down A: List[str] = () for downsample_block in self.down_blocks: A , A: Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: A: Dict = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): A: List[Any] = down_block_res_samples[-1:] A: List[str] = down_block_res_samples[:-1] A: Optional[int] = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) # 5. post-process if self.out_block: A: Any = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Dict , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> None: '''simple docstring''' warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
334
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None: '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: '''simple docstring''' A: List[Any] = n A: Tuple = [None] * self.n A: Optional[Any] = 0 # index of the first element A: Optional[int] = 0 A: List[Any] = 0 def __len__( self : Tuple ) -> int: '''simple docstring''' return self.size def _snake_case ( self : Union[str, Any] ) -> bool: '''simple docstring''' return self.size == 0 def _snake_case ( self : str ) -> int: '''simple docstring''' return False if self.is_empty() else self.array[self.front] def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: '''simple docstring''' if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) A: Optional[int] = data A: str = (self.rear + 1) % self.n self.size += 1 return self def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' if self.size == 0: raise Exception('''UNDERFLOW''' ) A: int = self.array[self.front] A: Union[str, Any] = None A: Tuple = (self.front + 1) % self.n self.size -= 1 return temp
334
'''simple docstring''' from collections import deque class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' A: Union[str, Any] = process_name # process name A: List[str] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A: Dict = arrival_time A: Optional[Any] = burst_time # remaining burst time A: Any = 0 # total time of the process wait in ready queue A: Any = 0 # time from arrival time to completion time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None: '''simple docstring''' A: Dict = number_of_queues # time slice of queues that round robin algorithm applied A: int = time_slices # unfinished process is in this ready_queue A: Tuple = queue # current time A: int = current_time # finished process is in this sequence queue A: deque[Process] = deque() def _snake_case ( self : List[Any] ) -> list[str]: '''simple docstring''' A: str = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Optional[int] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Any = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: List[Any] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]: '''simple docstring''' return [q.burst_time for q in queue] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int: '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of finished process while len(SCREAMING_SNAKE_CASE_ ) != 0: A: Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A: Any = 0 # set the process's turnaround time because it is finished A: int = self.current_time - cp.arrival_time # set the completion time A: List[str] = self.current_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): A: Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A: Optional[Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(SCREAMING_SNAKE_CASE_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A: int = 0 # set the finish time A: Union[str, Any] = self.current_time # update the process' turnaround time because it is finished A: Tuple = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _snake_case ( self : Optional[Any] ) -> deque[Process]: '''simple docstring''' for i in range(self.number_of_queues - 1 ): A , A: Optional[Any] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0) UpperCamelCase = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( f'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( f'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for data in source_data: for i, el in enumerate(__lowercase ): if len(__lowercase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowercase ) ) return data_lists def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for dlist, weight in zip(__lowercase , __lowercase ): A: List[str] = min(__lowercase ) A: Union[str, Any] = max(__lowercase ) A: list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A: List[str] = F"""Invalid weight of {weight:f} provided""" raise ValueError(__lowercase ) score_lists.append(__lowercase ) return score_lists def SCREAMING_SNAKE_CASE( __lowercase ) -> list[float]: A: list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowercase ): A: str = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: Any = get_data(__lowercase ) A: str = calculate_each_score(__lowercase , __lowercase ) A: int = generate_final_scores(__lowercase ) # append scores to source data for i, ele in enumerate(__lowercase ): source_data[i].append(__lowercase ) return source_data
334
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger() @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> int: '''simple docstring''' A: List[str] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(SCREAMING_SNAKE_CASE_ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Dict: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(SCREAMING_SNAKE_CASE_ ) [x.remove() for x in self.handles] return self @property def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : nn.Module UpperCamelCase_ : int = 0 UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]: '''simple docstring''' A: Dict = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized A: Tuple = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) ) A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise Exception( f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while""" f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" ) for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = True ) -> Any: print(F"""Converting {name}...""" ) with torch.no_grad(): A: Union[str, Any] = timm.create_model(__lowercase , pretrained=__lowercase ).eval() A: List[str] = ResNetForImageClassification(__lowercase ).eval() A: int = ModuleTransfer(src=__lowercase , dest=__lowercase ) A: List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(__lowercase ) assert torch.allclose(from_model(__lowercase ) , our_model(__lowercase ).logits ), "The model logits don't match the original one." A: str = F"""resnet{'-'.join(name.split('resnet' ) )}""" print(__lowercase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowercase , ) # we can use the convnext one A: Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , ) print(F"""Pushed {checkpoint_name}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> List[Any]: A: Union[str, Any] = '''imagenet-1k-id2label.json''' A: Union[str, Any] = 1_0_0_0 A: Optional[int] = (1, num_labels) A: Dict = '''huggingface/label-files''' A: Any = num_labels A: Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Optional[int] = {int(__lowercase ): v for k, v in idalabel.items()} A: Optional[int] = idalabel A: List[str] = {v: k for k, v in idalabel.items()} A: str = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase ) A: Optional[Any] = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(__lowercase , names_to_config[model_name] , __lowercase , __lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__lowercase , __lowercase , __lowercase , __lowercase ) return config, expected_shape if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1
'''simple docstring''' import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments UpperCamelCase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[float] = field( default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} ) UpperCamelCase_ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Whether to SortishSamler or not."""} ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) UpperCamelCase_ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """whether to use adafactor"""} ) UpperCamelCase_ : Optional[float] = field( default=UpperCAmelCase_ , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} ) UpperCamelCase_ : Optional[float] = field( default=UpperCAmelCase_ , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} ) UpperCamelCase_ : Optional[float] = field(default=UpperCAmelCase_ , metadata={"""help""": """Dropout probability. Goes into model.config."""} ) UpperCamelCase_ : Optional[float] = field( default=UpperCAmelCase_ , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} ) UpperCamelCase_ : Optional[str] = field( default="""linear""" , metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
334
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str | Literal[False]: A: List[str] = list(__lowercase ) A: Optional[Any] = list(__lowercase ) A: int = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count += 1 A: Optional[Any] = '''_''' if count > 1: return False else: return "".join(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[str]: A: Any = [] while True: A: Dict = ['''$'''] * len(__lowercase ) A: Union[str, Any] = [] for i in range(len(__lowercase ) ): for j in range(i + 1 , len(__lowercase ) ): A: Any = compare_string(binary[i] , binary[j] ) if k is False: A: Any = '''*''' A: List[Any] = '''*''' temp.append('''X''' ) for i in range(len(__lowercase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__lowercase ) == 0: return pi A: List[Any] = list(set(__lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]: A: Optional[int] = [] for minterm in minterms: A: Optional[int] = '''''' for _ in range(__lowercase ): A: List[Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(__lowercase ) return temp def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> bool: A: Union[str, Any] = list(__lowercase ) A: Union[str, Any] = list(__lowercase ) A: Optional[int] = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]: A: List[Any] = [] A: Dict = [0] * len(__lowercase ) for i in range(len(chart[0] ) ): A: List[str] = 0 A: str = -1 for j in range(len(__lowercase ) ): if chart[j][i] == 1: count += 1 A: Any = j if count == 1: A: Any = 1 for i in range(len(__lowercase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__lowercase ) ): A: Optional[int] = 0 temp.append(prime_implicants[i] ) while True: A: Dict = 0 A: Optional[int] = -1 A: Dict = 0 for i in range(len(__lowercase ) ): A: str = chart[i].count(1 ) if count_n > max_n: A: Tuple = count_n A: Optional[Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__lowercase ) ): A: Any = 0 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[int]]: A: str = [[0 for x in range(len(__lowercase ) )] for x in range(len(__lowercase ) )] for i in range(len(__lowercase ) ): A: Tuple = prime_implicants[i].count('''_''' ) for j in range(len(__lowercase ) ): if is_for_table(prime_implicants[i] , binary[j] , __lowercase ): A: Optional[Any] = 1 return chart def SCREAMING_SNAKE_CASE( ) -> None: A: int = int(input('''Enter the no. of variables\n''' ) ) A: Optional[int] = [ float(__lowercase ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] A: List[str] = decimal_to_binary(__lowercase , __lowercase ) A: str = check(__lowercase ) print('''Prime Implicants are:''' ) print(__lowercase ) A: List[Any] = prime_implicant_chart(__lowercase , __lowercase ) A: Any = selection(__lowercase , __lowercase ) print('''Essential Prime Implicants are:''' ) print(__lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
334
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) A: Union[str, Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' A: Dict = self.dummy_uncond_unet A: Any = ScoreSdeVeScheduler() A: Any = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) sde_ve.to(SCREAMING_SNAKE_CASE_ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) A: Any = torch.manual_seed(0 ) A: int = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE_ ).images A: List[str] = torch.manual_seed(0 ) A: List[Any] = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )[ 0 ] A: Union[str, Any] = image[0, -3:, -3:, -1] A: Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A: int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: Dict = '''google/ncsnpp-church-256''' A: Any = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) A: Dict = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ ) sde_ve.to(SCREAMING_SNAKE_CASE_ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = torch.manual_seed(0 ) A: Tuple = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE_ ).images A: Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) A: Optional[int] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
334
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: Tuple = len(__lowercase ) for i in range(length - 1 ): A: Dict = i for k in range(i + 1 , __lowercase ): if collection[k] < collection[least]: A: List[str] = k if least != i: A , A: Tuple = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> str: return " ".join( ''''''.join(word[::-1] ) if len(__lowercase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
334
'''simple docstring''' class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' A: Tuple = None A: Dict = None A: Optional[int] = graph self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: str = len(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = None def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str: '''simple docstring''' if sources is int: A: Union[str, Any] = [sources] if sinks is int: A: Tuple = [sinks] if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: return A: List[str] = sources[0] A: Optional[int] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1: A: Any = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A: Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A: Optional[Any] = max_input_flow A: Optional[Any] = 0 A: str = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A: Optional[Any] = max_input_flow A: str = size - 1 def _snake_case ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: '''simple docstring''' A: Optional[Any] = algorithm(self ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]: '''simple docstring''' A: str = flow_network A: List[str] = flow_network.verticesCount A: Dict = flow_network.sourceIndex A: Any = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A: str = flow_network.graph A: str = False def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' if not self.executed: self._algorithm() A: str = True def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) # use this to save your result A: Any = -1 def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )] A: Any = [0] * self.verticies_count A: Optional[Any] = [0] * self.verticies_count def _snake_case ( self : str ) -> Optional[Any]: '''simple docstring''' A: Any = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A: str = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A: Dict = 0 while i < len(SCREAMING_SNAKE_CASE_ ): A: Any = vertices_list[i] A: str = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) ) A: Tuple = 0 else: i += 1 A: Tuple = sum(self.preflow[self.source_index] ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.relabel(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int: '''simple docstring''' A: Optional[int] = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int: '''simple docstring''' A: Optional[Any] = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A: List[Any] = self.heights[to_index] if min_height is not None: A: int = min_height + 1 if __name__ == "__main__": UpperCamelCase = [0] UpperCamelCase = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network UpperCamelCase = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate UpperCamelCase = flow_network.find_maximum_flow() print(f'maximum flow is {maximum_flow}')
334
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCamelCase = pytest.mark.integration UpperCamelCase = {'''comet'''} UpperCamelCase = importlib.util.find_spec('''fairseq''') is not None UpperCamelCase = {'''code_eval'''} UpperCamelCase = os.name == '''nt''' UpperCamelCase = {'''bertscore''', '''frugalscore''', '''perplexity'''} UpperCamelCase = importlib.util.find_spec('''transformers''') is not None def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: @wraps(__lowercase ) def wrapper(self , __lowercase ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self , __lowercase ) return wrapper def SCREAMING_SNAKE_CASE( __lowercase ) -> int: @wraps(__lowercase ) def wrapper(self , __lowercase ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self , __lowercase ) return wrapper def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: @wraps(__lowercase ) def wrapper(self , __lowercase ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self , __lowercase ) return wrapper def SCREAMING_SNAKE_CASE( ) -> Optional[Any]: A: Tuple = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) @local class lowerCAmelCase_ ( parameterized.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = {} UpperCamelCase_ : Optional[Any] = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]: '''simple docstring''' A: Dict = '''[...]''' A: int = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE_ ) ).module_path ) A: Dict = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE_ ) # check parameters A: Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(SCREAMING_SNAKE_CASE_ , metric_module.__name__ ): with self.use_local_metrics(): try: A: str = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]: '''simple docstring''' A: int = '''[...]''' A: Any = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE_ ) ).module_path ) # run doctest with self.use_local_metrics(): A: str = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> str: '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE_ ): yield else: yield @contextmanager def _snake_case ( self : List[str] ) -> Dict: '''simple docstring''' def load_local_metric(SCREAMING_SNAKE_CASE_ : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ): return load_metric(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE_ ) , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with patch('''datasets.load_metric''' ) as mock_load_metric: A: Union[str, Any] = load_local_metric yield @classmethod def _snake_case ( cls : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: '''simple docstring''' def wrapper(SCREAMING_SNAKE_CASE_ : Optional[Any] ): A: Optional[Any] = contextmanager(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Any: import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]: '''simple docstring''' assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: A: Union[str, Any] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: import torch def bert_cos_score_idf(__lowercase , __lowercase , *__lowercase , **__lowercase ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowercase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: A: str = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: def load_from_checkpoint(__lowercase ): class lowerCAmelCase_ : '''simple docstring''' def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[Any]: '''simple docstring''' assert len(SCREAMING_SNAKE_CASE_ ) == 2 A: List[Any] = [0.19, 0.92] return scores, sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: A: Optional[int] = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: A: Optional[Any] = load_from_checkpoint yield def SCREAMING_SNAKE_CASE( ) -> List[str]: A: int = load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) A: Union[str, Any] = '''ERROR''' A: Any = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(__lowercase , match=re.escape(__lowercase ) ): metric.compute(predictions=[] , references=[] , scheme=__lowercase )
334
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ["""input_features""", """attention_mask"""] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : int=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]: '''simple docstring''' super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = num_mel_bins A: str = do_ceptral_normalize A: int = normalize_means A: List[Any] = normalize_vars A: Any = True def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , ) -> np.ndarray: '''simple docstring''' A: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) A: List[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> np.ndarray: '''simple docstring''' if normalize_means: A: str = x[:input_length].mean(axis=0 ) A: Dict = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if normalize_vars: A: Tuple = x[:input_length].std(axis=0 ) A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if input_length < x.shape[0]: A: Optional[int] = padding_value # make sure array is in float32 A: Optional[Any] = x.astype(np.floataa ) return x def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]: '''simple docstring''' A: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A: Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) A: Optional[Any] = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): A: int = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A: Any = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A: Union[str, Any] = [raw_speech] # extract fbank features A: str = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech] # convert into correct format for padding A: int = BatchFeature({'''input_features''': features} ) A: int = self.pad( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # make sure list is in array format A: List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ): A: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features] A: List[Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A: Dict = ( np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD else None ) A: List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ ) if return_tensors is not None: A: Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return padded_inputs
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''], '''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''VisionTextDualEncoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxVisionTextDualEncoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TFVisionTextDualEncoderModel'''] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
334
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = DebertaTokenizer UpperCamelCase_ : List[str] = True UpperCamelCase_ : int = DebertaTokenizerFast def _snake_case ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A: Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] A: Union[str, Any] = {'''unk_token''': '''[UNK]'''} A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]: '''simple docstring''' A: Optional[int] = '''lower newer''' A: str = '''lower newer''' return input_text, output_text def _snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A: str = self.get_tokenizer() A: Any = '''lower newer''' A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokens + [tokenizer.unk_token] A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Any: '''simple docstring''' A: str = self.get_tokenizer() A: List[str] = tokenizer('''Hello''' , '''World''' ) A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Tuple ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: int = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Dict = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']] # fmt: off A: Any = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on A: Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ ) for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' import re import string import numpy as np import datasets UpperCamelCase = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' UpperCamelCase = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' UpperCamelCase = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , reference_urls=[] , ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[str]=False , ) -> int: '''simple docstring''' if regexes_to_ignore is not None: for s in regexes_to_ignore: A: Optional[int] = np.array([re.sub(SCREAMING_SNAKE_CASE_ , '''''' , SCREAMING_SNAKE_CASE_ ) for x in predictions] ) A: Dict = np.array([re.sub(SCREAMING_SNAKE_CASE_ , '''''' , SCREAMING_SNAKE_CASE_ ) for x in references] ) else: A: List[Any] = np.asarray(SCREAMING_SNAKE_CASE_ ) A: Any = np.asarray(SCREAMING_SNAKE_CASE_ ) if ignore_case: A: List[Any] = np.char.lower(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = np.char.lower(SCREAMING_SNAKE_CASE_ ) if ignore_punctuation: A: Tuple = string.punctuation.maketrans('''''' , '''''' , string.punctuation ) A: Dict = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ ) A: Any = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ ) if ignore_numbers: A: Optional[int] = string.digits.maketrans('''''' , '''''' , string.digits ) A: Optional[int] = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ ) A: Any = np.char.translate(SCREAMING_SNAKE_CASE_ , table=SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = predictions == references return {"exact_match": np.mean(SCREAMING_SNAKE_CASE_ ) * 1_00}
334
'''simple docstring''' import requests UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def SCREAMING_SNAKE_CASE( __lowercase ) -> None: # fetching a list of articles in json format A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['''articles'''] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
334
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[Any] = XGLMConfig UpperCamelCase_ : str = {} UpperCamelCase_ : int = """gelu""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int=14 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : Dict=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = parent A: Optional[Any] = batch_size A: Optional[int] = seq_length A: Union[str, Any] = is_training A: str = use_input_mask A: int = use_labels A: List[str] = vocab_size A: Optional[int] = d_model A: int = num_hidden_layers A: Optional[Any] = num_attention_heads A: Any = ffn_dim A: Optional[int] = activation_function A: Optional[int] = activation_dropout A: Optional[int] = attention_dropout A: Tuple = max_position_embeddings A: Optional[int] = initializer_range A: List[Any] = None A: List[Any] = 0 A: Tuple = 2 A: Optional[Any] = 1 def _snake_case ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) A: List[str] = None if self.use_input_mask: A: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A: List[Any] = self.get_config() A: str = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _snake_case ( self : Union[str, Any] ) -> str: '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , ) def _snake_case ( self : str ) -> str: '''simple docstring''' A: Any = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ): Union[str, Any] = config_and_inputs A: Optional[int] = { '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () UpperCamelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else () UpperCamelCase_ : Union[str, Any] = ( {"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {} ) UpperCamelCase_ : List[Any] = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Tuple = False def _snake_case ( self : Union[str, Any] ) -> int: '''simple docstring''' A: Dict = TFXGLMModelTester(self ) A: Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 ) def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() @slow def _snake_case ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: Optional[int] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def _snake_case ( self : str ) -> Optional[int]: '''simple docstring''' super().test_resize_token_embeddings() @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Dict=True ) -> str: '''simple docstring''' A: List[Any] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) A: Optional[int] = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off A: List[str] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on A: List[Any] = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Union[str, Any] ) -> str: '''simple docstring''' A: str = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) A: List[str] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) A: Optional[int] = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' ) A: Union[str, Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): A: Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] ) A: str = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Dict = ( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Dict ) -> Optional[Any]: '''simple docstring''' A: Dict = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) A: Any = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) A: List[str] = '''left''' # use different length sentences to test batching A: List[str] = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' , padding=SCREAMING_SNAKE_CASE_ ) A: Tuple = inputs['''input_ids'''] A: Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 ) A: Union[str, Any] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids A: str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) A: Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids A: List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 ) A: int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = [ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
334
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } UpperCamelCase = { '''camembert-base''': 512, } UpperCamelCase = '''▁''' class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : int = CamembertTokenizer def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : Any , ) -> Any: '''simple docstring''' A: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Any = vocab_file A: Any = False if not self.vocab_file else True def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: List[str] = [self.cls_token_id] A: List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: List[str] = [self.sep_token_id] A: Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Dict = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
334
1
'''simple docstring''' import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : Dict=64 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : int=5_12 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : Dict=1 , ) -> Tuple: '''simple docstring''' A: Optional[int] = parent A: Any = batch_size A: str = seq_length A: Optional[Any] = is_training A: Optional[Any] = use_input_mask A: Any = use_token_type_ids A: Optional[int] = use_labels A: List[Any] = vocab_size A: Optional[Any] = hidden_size A: Dict = num_hidden_layers A: Tuple = num_attention_heads A: List[Any] = intermediate_size A: str = hidden_act A: Union[str, Any] = hidden_dropout_prob A: List[str] = attention_probs_dropout_prob A: Any = max_position_embeddings A: Optional[Any] = type_vocab_size A: List[Any] = type_sequence_label_size A: str = initializer_range A: Optional[int] = num_labels A: List[str] = num_choices A: int = scope A: List[str] = q_groups A: int = k_groups A: int = v_groups A: str = post_attention_groups A: Optional[int] = intermediate_groups A: int = output_groups def _snake_case ( self : Tuple ) -> List[str]: '''simple docstring''' A: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A: Optional[Any] = None if self.use_input_mask: A: Any = random_attention_mask([self.batch_size, self.seq_length] ) A: List[Any] = None A: Tuple = None A: int = None if self.use_labels: A: str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A: Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) A: Any = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self : Any ) -> int: '''simple docstring''' return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]: '''simple docstring''' A: Any = SqueezeBertModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Tuple = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Any = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> str: '''simple docstring''' A: List[str] = SqueezeBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Optional[int] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> Any: '''simple docstring''' A: str = SqueezeBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Dict = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]: '''simple docstring''' A: Optional[int] = self.num_labels A: Dict = SqueezeBertForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = self.num_labels A: Optional[int] = SqueezeBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Optional[int] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> Optional[Any]: '''simple docstring''' A: Dict = self.num_choices A: Optional[int] = SqueezeBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A: Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A: Any = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self : Union[str, Any] ) -> Any: '''simple docstring''' A: Any = self.prepare_config_and_inputs() ((A) , (A) , (A) , (A) , (A) , (A)): List[Any] = config_and_inputs A: Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Dict = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) UpperCamelCase_ : str = ( { """feature-extraction""": SqueezeBertModel, """fill-mask""": SqueezeBertForMaskedLM, """question-answering""": SqueezeBertForQuestionAnswering, """text-classification""": SqueezeBertForSequenceClassification, """token-classification""": SqueezeBertForTokenClassification, """zero-shot""": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : Any = False UpperCamelCase_ : str = True UpperCamelCase_ : Optional[Any] = False def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' A: str = SqueezeBertModelTester(self ) A: Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 ) def _snake_case ( self : List[str] ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def _snake_case ( self : List[str] ) -> int: '''simple docstring''' A: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Any: '''simple docstring''' A: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> str: '''simple docstring''' A: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Dict: '''simple docstring''' A: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Optional[int]: '''simple docstring''' A: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Optional[int] ) -> List[Any]: '''simple docstring''' for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: str = SqueezeBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: Optional[int] = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' ) A: Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] ) A: Optional[Any] = model(SCREAMING_SNAKE_CASE_ )[0] A: List[Any] = torch.Size((1, 3) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) A: List[Any] = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
334
'''simple docstring''' import os from distutils.util import strtobool def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]: for e in env_keys: A: Dict = int(os.environ.get(__lowercase , -1 ) ) if val >= 0: return val return default def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> List[str]: A: str = os.environ.get(__lowercase , str(__lowercase ) ) return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int... def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="no" ) -> str: A: Optional[int] = os.environ.get(__lowercase , str(__lowercase ) ) return value
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() UpperCamelCase = logging.get_logger('''transformers.models.encodec''') UpperCamelCase = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } UpperCamelCase = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } UpperCamelCase = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } UpperCamelCase = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } UpperCamelCase = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } UpperCamelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } UpperCamelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } UpperCamelCase = [] UpperCamelCase = [] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict: for attribute in key.split('''.''' ): A: Union[str, Any] = getattr(__lowercase , __lowercase ) if weight_type is not None: A: Tuple = getattr(__lowercase , __lowercase ).shape else: A: str = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": A: Dict = value elif weight_type == "weight_g": A: Tuple = value elif weight_type == "weight_v": A: Any = value elif weight_type == "bias": A: str = value elif weight_type == "running_mean": A: List[Any] = value elif weight_type == "running_var": A: Dict = value elif weight_type == "num_batches_tracked": A: List[str] = value elif weight_type == "weight_ih_l0": A: Dict = value elif weight_type == "weight_hh_l0": A: Optional[int] = value elif weight_type == "bias_ih_l0": A: List[Any] = value elif weight_type == "bias_hh_l0": A: str = value elif weight_type == "weight_ih_l1": A: Optional[int] = value elif weight_type == "weight_hh_l1": A: int = value elif weight_type == "bias_ih_l1": A: Optional[Any] = value elif weight_type == "bias_hh_l1": A: str = value else: A: Optional[int] = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: A , A: Any = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: A: Any = [] if model_name == "encodec_24khz" or "encodec_32khz": A: List[str] = MAPPING_24K elif model_name == "encodec_48khz": A: List[Any] = MAPPING_48K else: raise ValueError(F"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(__lowercase , __lowercase ): logger.info(F"""{name} was ignored""" ) continue A: Optional[int] = False for key, mapped_key in MAPPING.items(): if "*" in key: A , A: Optional[int] = key.split('''.*.''' ) if prefix in name and suffix in name: A: str = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue A: Optional[Any] = True if "*" in mapped_key: A: Any = name.split(__lowercase )[0].split('''.''' )[-2] A: Tuple = mapped_key.replace('''*''' , __lowercase ) if "weight_g" in name: A: str = '''weight_g''' elif "weight_v" in name: A: List[Any] = '''weight_v''' elif "weight_ih_l0" in name: A: Dict = '''weight_ih_l0''' elif "weight_hh_l0" in name: A: int = '''weight_hh_l0''' elif "bias_ih_l0" in name: A: Union[str, Any] = '''bias_ih_l0''' elif "bias_hh_l0" in name: A: Tuple = '''bias_hh_l0''' elif "weight_ih_l1" in name: A: int = '''weight_ih_l1''' elif "weight_hh_l1" in name: A: Optional[Any] = '''weight_hh_l1''' elif "bias_ih_l1" in name: A: Dict = '''bias_ih_l1''' elif "bias_hh_l1" in name: A: str = '''bias_hh_l1''' elif "bias" in name: A: Union[str, Any] = '''bias''' elif "weight" in name: A: Dict = '''weight''' elif "running_mean" in name: A: Tuple = '''running_mean''' elif "running_var" in name: A: Any = '''running_var''' elif "num_batches_tracked" in name: A: str = '''num_batches_tracked''' else: A: Tuple = None set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) continue if not is_used: unused_weights.append(__lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ) -> Dict: if config_path is not None: A: Tuple = EncodecConfig.from_pretrained(__lowercase ) else: A: Union[str, Any] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A: Union[str, Any] = [8, 5, 4, 4] A: Dict = [2.2] A: List[Any] = 6_4 A: Optional[Any] = 3_2_0_0_0 A: List[Any] = 2_0_4_8 A: Optional[Any] = False A: int = False A: Union[str, Any] = False elif model_name == "encodec_48khz": A: Optional[int] = [8, 5, 4, 2] A: List[Any] = [3.0, 6.0, 1_2.0, 2_4.0] A: List[Any] = 4_8_0_0_0 A: int = 2 A: List[Any] = False A: Any = '''time_group_norm''' A: Optional[Any] = True A: Any = 1.0 A: Any = 0.0_1 else: raise ValueError(F"""Unknown model name: {model_name}""" ) A: str = EncodecModel(__lowercase ) A: Optional[Any] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(__lowercase ) A: Union[str, Any] = torch.load(__lowercase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A: Optional[int] = original_checkpoint['''best_state'''] recursively_load_weights(__lowercase , __lowercase , __lowercase ) model.save_pretrained(__lowercase ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(__lowercase ) model.push_to_hub(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
334
1
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} UpperCamelCase = { '''vocab_file''': { '''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''', '''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''', }, } UpperCamelCase = { '''facebook/esm2_t6_8M_UR50D''': 1024, '''facebook/esm2_t12_35M_UR50D''': 1024, } def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]: with open(__lowercase , '''r''' ) as f: A: Any = f.read().splitlines() return [l.strip() for l in lines] class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES UpperCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<cls>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE_ : Any="<eos>" , **SCREAMING_SNAKE_CASE_ : Any , ) -> Optional[Any]: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = load_vocab_file(SCREAMING_SNAKE_CASE_ ) A: int = dict(enumerate(self.all_tokens ) ) A: Optional[int] = {tok: ind for ind, tok in enumerate(self.all_tokens )} A: Dict = unk_token A: Optional[int] = cls_token A: List[Any] = pad_token A: str = mask_token A: Dict = eos_token A: Tuple = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str: '''simple docstring''' return self._id_to_token.get(SCREAMING_SNAKE_CASE_ , self.unk_token ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> int: '''simple docstring''' return self._token_to_id.get(SCREAMING_SNAKE_CASE_ , self._token_to_id.get(self.unk_token ) ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> int: '''simple docstring''' return text.split() def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=False ) -> Any: '''simple docstring''' return len(self._id_to_token ) def _snake_case ( self : List[Any] ) -> List[str]: '''simple docstring''' return {token: i for i, token in enumerate(self.all_tokens )} def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ) -> int: '''simple docstring''' return self._token_to_id.get(SCREAMING_SNAKE_CASE_ , self._token_to_id.get(self.unk_token ) ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str: '''simple docstring''' return self._id_to_token.get(SCREAMING_SNAKE_CASE_ , self.unk_token ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: str = [self.cls_token_id] A: Dict = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List , SCREAMING_SNAKE_CASE_ : Optional[List] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] A: Any = [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] if token_ids_a is not None: mask += [0] * len(SCREAMING_SNAKE_CASE_ ) + [1] return mask def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> Any: '''simple docstring''' A: Any = os.path.join(SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def _snake_case ( self : Optional[int] ) -> int: '''simple docstring''' return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[List[str], List[AddedToken]] , SCREAMING_SNAKE_CASE_ : bool = False ) -> int: '''simple docstring''' return super()._add_tokens(SCREAMING_SNAKE_CASE_ , special_tokens=SCREAMING_SNAKE_CASE_ )
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Optional[Any]: '''simple docstring''' A: Optional[int] = parent A: Union[str, Any] = 13 A: Any = 7 A: Optional[int] = True A: Optional[int] = True A: Optional[int] = True A: Optional[int] = 99 A: Optional[Any] = 32 A: Optional[Any] = 2 A: List[Any] = 4 A: Any = 37 A: Dict = '''gelu''' A: int = 0.1 A: str = 0.1 A: List[Any] = 5_12 A: Tuple = 16 A: Any = 2 A: str = 0.02 A: Optional[int] = 3 A: Union[str, Any] = 4 A: List[Any] = None def _snake_case ( self : Tuple ) -> List[str]: '''simple docstring''' A: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A: List[Any] = None if self.use_input_mask: A: Any = random_attention_mask([self.batch_size, self.seq_length] ) A: Optional[int] = None A: Optional[Any] = None A: Tuple = None if self.use_labels: A: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A: List[str] = ids_tensor([self.batch_size] , self.num_choices ) A: Any = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self : str ) -> Tuple: '''simple docstring''' ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ): Optional[int] = self.prepare_config_and_inputs() A: Tuple = True A: Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' A: Optional[Any] = TFEsmModel(config=SCREAMING_SNAKE_CASE_ ) A: List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} A: Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = [input_ids, input_mask] A: List[str] = model(SCREAMING_SNAKE_CASE_ ) A: int = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[int]: '''simple docstring''' A: Tuple = True A: Tuple = TFEsmModel(config=SCREAMING_SNAKE_CASE_ ) A: Tuple = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''encoder_hidden_states''': encoder_hidden_states, '''encoder_attention_mask''': encoder_attention_mask, } A: Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = [input_ids, input_mask] A: List[Any] = model(SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ) # Also check the case where encoder outputs are not passed A: Optional[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: int = TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) A: List[str] = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int: '''simple docstring''' A: Tuple = self.num_labels A: List[Any] = TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) A: List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} A: Dict = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self : str ) -> str: '''simple docstring''' A: Union[str, Any] = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ): Tuple = config_and_inputs A: Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) UpperCamelCase_ : Dict = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Any = False def _snake_case ( self : Optional[Any] ) -> Dict: '''simple docstring''' A: Optional[int] = TFEsmModelTester(self ) A: Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def _snake_case ( self : int ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' A: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' A: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Dict: '''simple docstring''' A: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Any ) -> Any: '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: Union[str, Any] = TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Protein models do not support embedding resizing.''' ) def _snake_case ( self : str ) -> Tuple: '''simple docstring''' pass @unittest.skip('''Protein models do not support embedding resizing.''' ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' pass def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' A , A: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: str = model_class(SCREAMING_SNAKE_CASE_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer A: Union[str, Any] = model.get_bias() assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for k, v in name.items(): assert isinstance(SCREAMING_SNAKE_CASE_ , tf.Variable ) else: A: Optional[Any] = model.get_output_embeddings() assert x is None A: Any = model.get_bias() assert name is None @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : Tuple ) -> int: '''simple docstring''' A: List[Any] = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) A: Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) A: Optional[int] = model(SCREAMING_SNAKE_CASE_ )[0] A: int = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , SCREAMING_SNAKE_CASE_ ) # compare the actual values for a slice. A: Union[str, Any] = tf.constant( [ [ [8.92_1518, -10.58_9814, -6.467_1307], [-6.396_7156, -13.91_1377, -1.121_1915], [-7.78_1247, -13.95_1557, -3.74_0592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def _snake_case ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) A: Optional[int] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A: int = model(SCREAMING_SNAKE_CASE_ )[0] # compare the actual values for a slice. A: List[str] = tf.constant( [ [ [0.1444_3092, 0.5412_5327, 0.324_7739], [0.3034_0484, 0.0052_6676, 0.3107_7722], [0.3227_8043, -0.2498_7096, 0.341_4628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
334
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for data in source_data: for i, el in enumerate(__lowercase ): if len(__lowercase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowercase ) ) return data_lists def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for dlist, weight in zip(__lowercase , __lowercase ): A: List[str] = min(__lowercase ) A: Union[str, Any] = max(__lowercase ) A: list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A: List[str] = F"""Invalid weight of {weight:f} provided""" raise ValueError(__lowercase ) score_lists.append(__lowercase ) return score_lists def SCREAMING_SNAKE_CASE( __lowercase ) -> list[float]: A: list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowercase ): A: str = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: Any = get_data(__lowercase ) A: str = calculate_each_score(__lowercase , __lowercase ) A: int = generate_final_scores(__lowercase ) # append scores to source data for i, ele in enumerate(__lowercase ): source_data[i].append(__lowercase ) return source_data
334
1
'''simple docstring''' import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets UpperCamelCase = '''\ @inproceedings{lin-2004-rouge, title = "{ROUGE}: A Package for Automatic Evaluation of Summaries", author = "Lin, Chin-Yew", booktitle = "Text Summarization Branches Out", month = jul, year = "2004", address = "Barcelona, Spain", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W04-1013", pages = "74--81", } ''' UpperCamelCase = '''\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge ''' UpperCamelCase = ''' Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring, `"rougeL"`: Longest common subsequence based scoring. `"rougeLSum"`: rougeLsum splits text using `"\n"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric(\'rouge\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\'] >>> print(results["rouge1"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results["rouge1"].mid.fmeasure) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self : Optional[Any] ) -> str: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/ROUGE_(metric)''', '''https://github.com/google-research/google-research/tree/master/rouge''', ] , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=False ) -> List[str]: '''simple docstring''' if rouge_types is None: A: str = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum'''] A: Union[str, Any] = rouge_scorer.RougeScorer(rouge_types=SCREAMING_SNAKE_CASE_ , use_stemmer=SCREAMING_SNAKE_CASE_ ) if use_aggregator: A: Union[str, Any] = scoring.BootstrapAggregator() else: A: List[str] = [] for ref, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: Tuple = scorer.score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if use_aggregator: aggregator.add_scores(SCREAMING_SNAKE_CASE_ ) else: scores.append(SCREAMING_SNAKE_CASE_ ) if use_aggregator: A: Dict = aggregator.aggregate() else: A: Tuple = {} for key in scores[0]: A: str = [score[key] for score in scores] return result
334
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = DPRContextEncoderTokenizer class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Dict = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Optional[int] = DPRQuestionEncoderTokenizer UpperCamelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) UpperCamelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ : '''simple docstring''' def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) elif titles is None or texts is None: A: Union[str, Any] = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Union[str, Any] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles] A: Optional[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts] A: str = len(SCREAMING_SNAKE_CASE_ ) A: List[Any] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages assert len(SCREAMING_SNAKE_CASE_ ) == len( SCREAMING_SNAKE_CASE_ ), f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts.""" A: Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: Dict = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: str = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] } if return_attention_mask is not False: A: Union[str, Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) A: Optional[Any] = attention_mask return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : BatchEncoding , SCREAMING_SNAKE_CASE_ : DPRReaderOutput , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : int = 64 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Any = reader_input['''input_ids'''] A , A , A: str = reader_output[:3] A: str = len(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ ) A: List[DPRReaderOutput] = [] for doc_id in sorted_docs: A: List[str] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence A: Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A: Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: A: int = len(SCREAMING_SNAKE_CASE_ ) A: Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(SCREAMING_SNAKE_CASE_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Union[str, Any] = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) A: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ ) A: Dict = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A: int = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(SCREAMING_SNAKE_CASE_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : Optional[Any] = DPRReaderTokenizer
334
1
'''simple docstring''' import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''tensor(bool)''': np.bool_, '''tensor(int8)''': np.inta, '''tensor(uint8)''': np.uinta, '''tensor(int16)''': np.intaa, '''tensor(uint16)''': np.uintaa, '''tensor(int32)''': np.intaa, '''tensor(uint32)''': np.uintaa, '''tensor(int64)''': np.intaa, '''tensor(uint64)''': np.uintaa, '''tensor(float16)''': np.floataa, '''tensor(float)''': np.floataa, '''tensor(double)''': np.floataa, } class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' ) A: int = model A: Union[str, Any] = kwargs.get('''model_save_dir''' , SCREAMING_SNAKE_CASE_ ) A: List[Any] = kwargs.get('''latest_model_name''' , SCREAMING_SNAKE_CASE_ ) def __call__( self : Any , **SCREAMING_SNAKE_CASE_ : str ) -> Any: '''simple docstring''' A: Optional[int] = {k: np.array(SCREAMING_SNAKE_CASE_ ) for k, v in kwargs.items()} return self.model.run(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Path] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ) -> Tuple: '''simple docstring''' if provider is None: logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' ) A: Optional[int] = '''CPUExecutionProvider''' return ort.InferenceSession(SCREAMING_SNAKE_CASE_ , providers=[provider] , sess_options=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Path] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str: '''simple docstring''' A: Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME A: Optional[int] = self.model_save_dir.joinpath(self.latest_model_name ) A: List[str] = Path(SCREAMING_SNAKE_CASE_ ).joinpath(SCREAMING_SNAKE_CASE_ ) try: shutil.copyfile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) A: Any = self.model_save_dir.joinpath(SCREAMING_SNAKE_CASE_ ) if src_path.exists(): A: Dict = Path(SCREAMING_SNAKE_CASE_ ).joinpath(SCREAMING_SNAKE_CASE_ ) try: shutil.copyfile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except shutil.SameFileError: pass def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Tuple: '''simple docstring''' if os.path.isfile(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) # saving model weights/files self._save_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @classmethod def _snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Path] , SCREAMING_SNAKE_CASE_ : Optional[Union[bool, str, None]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, None]] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional["ort.SessionOptions"] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Optional[Any]: '''simple docstring''' A: int = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(SCREAMING_SNAKE_CASE_ ): A: Optional[Any] = OnnxRuntimeModel.load_model( os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , provider=SCREAMING_SNAKE_CASE_ , sess_options=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = Path(SCREAMING_SNAKE_CASE_ ) # load model from hub else: # download model A: Optional[Any] = hf_hub_download( repo_id=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , ) A: Optional[Any] = Path(SCREAMING_SNAKE_CASE_ ).parent A: Optional[Any] = Path(SCREAMING_SNAKE_CASE_ ).name A: Tuple = OnnxRuntimeModel.load_model(SCREAMING_SNAKE_CASE_ , provider=SCREAMING_SNAKE_CASE_ , sess_options=SCREAMING_SNAKE_CASE_ ) return cls(model=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @classmethod def _snake_case ( cls : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Path] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Dict: '''simple docstring''' A: int = None if len(str(SCREAMING_SNAKE_CASE_ ).split('''@''' ) ) == 2: A , A: int = model_id.split('''@''' ) return cls._from_pretrained( model_id=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = ''' Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "A red cartoon frog, 4k" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/frog.png" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save("red_frog.png") ``` ''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=8 ) -> str: A: Tuple = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 A: Optional[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=5_1_2 , __lowercase=5_1_2 ) -> str: A: List[Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) A: List[str] = np.array(pil_image.convert('''RGB''' ) ) A: Any = arr.astype(np.floataa ) / 1_2_7.5 - 1 A: List[Any] = np.transpose(__lowercase , [2, 0, 1] ) A: Optional[Any] = torch.from_numpy(__lowercase ).unsqueeze(0 ) return image class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : DDPMScheduler , SCREAMING_SNAKE_CASE_ : VQModel , ) -> Tuple: '''simple docstring''' super().__init__() self.register_modules( unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , movq=SCREAMING_SNAKE_CASE_ , ) A: str = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict: '''simple docstring''' A: int = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ ) A: Tuple = max(num_inference_steps - init_timestep , 0 ) A: str = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int=None ) -> Union[str, Any]: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE_ )}""" ) A: Dict = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) A: int = batch_size * num_images_per_prompt if image.shape[1] == 4: A: Any = image else: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: List[Any] = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ ) ] A: Tuple = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) else: A: Dict = self.movq.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ ) A: str = self.movq.config.scaling_factor * init_latents A: Dict = torch.cat([init_latents] , dim=0 ) A: List[str] = init_latents.shape A: Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) # get latents A: Dict = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = init_latents return latents def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 ) -> str: '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) A: int = torch.device(f"""cuda:{gpu_id}""" ) A: int = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ) -> str: '''simple docstring''' if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) A: Optional[int] = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=SCREAMING_SNAKE_CASE_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) A: Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: A , A: Any = cpu_offload_with_hook(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_module_hook=SCREAMING_SNAKE_CASE_ ) # We'll offload the last model manually. A: Any = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(SCREAMING_SNAKE_CASE_ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(SCREAMING_SNAKE_CASE_ ) def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE_ : int = 5_12 , SCREAMING_SNAKE_CASE_ : int = 5_12 , SCREAMING_SNAKE_CASE_ : int = 1_00 , SCREAMING_SNAKE_CASE_ : float = 4.0 , SCREAMING_SNAKE_CASE_ : float = 0.3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Tuple: '''simple docstring''' A: Union[str, Any] = self._execution_device A: Tuple = guidance_scale > 1.0 if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: str = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) A: str = image_embeds.shape[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: str = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 ) if do_classifier_free_guidance: A: List[Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) A: Tuple = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) A: Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE_ ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: Tuple = [image] if not all(isinstance(SCREAMING_SNAKE_CASE_ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"""Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) A: Tuple = torch.cat([prepare_image(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i in image] , dim=0 ) A: List[Any] = image.to(dtype=image_embeds.dtype , device=SCREAMING_SNAKE_CASE_ ) A: Tuple = self.movq.encode(SCREAMING_SNAKE_CASE_ )['''latents'''] A: Any = latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 ) self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ ) A , A: Dict = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt ) A , A: Optional[int] = downscale_height_and_width(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.movq_scale_factor ) A: Optional[int] = self.prepare_latents( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image_embeds.dtype , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ): # expand the latents if we are doing classifier free guidance A: List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A: Any = {'''image_embeds''': image_embeds} A: List[str] = self.unet( sample=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , added_cond_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0] if do_classifier_free_guidance: A , A: str = noise_pred.split(latents.shape[1] , dim=1 ) A , A: Optional[Any] = noise_pred.chunk(2 ) A , A: List[Any] = variance_pred.chunk(2 ) A: Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): A , A: int = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 A: Tuple = self.scheduler.step( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )[0] # post-processing A: Union[str, Any] = self.movq.decode(SCREAMING_SNAKE_CASE_ , force_not_quantize=SCREAMING_SNAKE_CASE_ )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: A: List[Any] = image * 0.5 + 0.5 A: List[str] = image.clamp(0 , 1 ) A: Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A: List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
334
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' pass class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None: '''simple docstring''' A: Any = data A: Node | None = None def __iter__( self : Optional[int] ) -> List[str]: '''simple docstring''' A: List[str] = self A: Dict = [] while node: if node in visited: raise ContainsLoopError visited.append(SCREAMING_SNAKE_CASE_ ) yield node.data A: str = node.next_node @property def _snake_case ( self : List[str] ) -> bool: '''simple docstring''' try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCamelCase = Node(1) UpperCamelCase = Node(2) UpperCamelCase = Node(3) UpperCamelCase = Node(4) print(root_node.has_loop) # False UpperCamelCase = root_node.next_node print(root_node.has_loop) # True UpperCamelCase = Node(5) UpperCamelCase = Node(6) UpperCamelCase = Node(5) UpperCamelCase = Node(6) print(root_node.has_loop) # False UpperCamelCase = Node(1) print(root_node.has_loop) # False
334
1
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : List[str] UpperCamelCase_ : Optional[str] = None # Automatically constructed UpperCamelCase_ : ClassVar[str] = "dict" UpperCamelCase_ : ClassVar[Any] = None UpperCamelCase_ : str = field(default="""Translation""" , init=UpperCAmelCase_ , repr=UpperCAmelCase_ ) def __call__( self : Dict ) -> List[Any]: '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def _snake_case ( self : Dict ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[List] = None UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : Optional[str] = None # Automatically constructed UpperCamelCase_ : ClassVar[str] = "dict" UpperCamelCase_ : ClassVar[Any] = None UpperCamelCase_ : str = field(default="""TranslationVariableLanguages""" , init=UpperCAmelCase_ , repr=UpperCAmelCase_ ) def _snake_case ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' A: int = sorted(set(self.languages ) ) if self.languages else None A: str = len(self.languages ) if self.languages else None def __call__( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]: '''simple docstring''' A: Tuple = set(self.languages ) if self.languages and set(SCREAMING_SNAKE_CASE_ ) - lang_set: raise ValueError( f"""Some languages in example ({', '.join(sorted(set(SCREAMING_SNAKE_CASE_ ) - lang_set ) )}) are not in valid set ({', '.join(SCREAMING_SNAKE_CASE_ )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. A: str = [] for lang, text in translation_dict.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. A , A: Tuple = zip(*sorted(SCREAMING_SNAKE_CASE_ ) ) return {"language": languages, "translation": translations} def _snake_case ( self : Any ) -> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
334
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase = 4 ) -> list[list[int]]: A: Tuple = abs(__lowercase ) or 4 return [[1 + x + y * row_size for x in range(__lowercase )] for y in range(__lowercase )] def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(transpose(__lowercase ) ) # OR.. transpose(reverse_column(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(reverse_column(__lowercase ) ) # OR.. reverse_column(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_column(transpose(__lowercase ) ) # OR.. transpose(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Union[str, Any] = [list(__lowercase ) for x in zip(*__lowercase )] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[int] = matrix[::-1] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[Any] = [x[::-1] for x in matrix] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> None: for i in matrix: print(*__lowercase ) if __name__ == "__main__": UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 90 counterclockwise:\n''') print_matrix(rotate_aa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 180:\n''') print_matrix(rotate_aaa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 270 counterclockwise:\n''') print_matrix(rotate_aaa(matrix))
334
1
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @parameterized.expand([(None,), ('''foo.json''',)] ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Dict ) -> str: '''simple docstring''' A: Dict = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) A: List[Any] = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: Optional[Any] = AutoConfig.from_pretrained('''gpt2''' ) A: Optional[Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) A: Any = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def _snake_case ( self : Optional[Any] ) -> List[str]: '''simple docstring''' A: Tuple = GenerationConfig() A: Any = { '''max_new_tokens''': 10_24, '''foo''': '''bar''', } A: List[str] = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) A: List[Any] = generation_config.update(**SCREAMING_SNAKE_CASE_ ) # update_kwargs was not modified (no side effects) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 10_24 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} ) def _snake_case ( self : int ) -> Any: '''simple docstring''' A: List[Any] = GenerationConfig() A: Tuple = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir: generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''' ) A: Union[str, Any] = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config def _snake_case ( self : str ) -> Any: '''simple docstring''' A: int = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(default_config.num_beams , 1 ) A: int = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: Any = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @classmethod def _snake_case ( cls : Any ) -> Optional[int]: '''simple docstring''' A: Tuple = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def _snake_case ( cls : Optional[Any] ) -> Dict: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-generation-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' ) except HTTPError: pass def _snake_case ( self : Any ) -> Union[str, Any]: '''simple docstring''' A: Tuple = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token ) A: List[Any] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) A: Optional[Any] = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Any ) -> int: '''simple docstring''' A: List[Any] = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token ) A: Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) A: List[str] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
334
'''simple docstring''' from __future__ import annotations import numpy as np def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: return np.maximum(0 , __lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
334
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu UpperCamelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: UpperCamelCase = json.load(f) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: '''simple docstring''' return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> str: '''simple docstring''' A: str = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: '''simple docstring''' A: List[str] = f"""facebook/wmt19-{pair}""" A: Any = self.get_tokenizer(SCREAMING_SNAKE_CASE_ ) A: int = self.get_model(SCREAMING_SNAKE_CASE_ ) A: str = bleu_data[pair]['''src'''] A: str = bleu_data[pair]['''tgt'''] A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ ) A: Dict = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A: int = tokenizer.batch_decode( SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = calculate_bleu(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(scores['''bleu'''] , SCREAMING_SNAKE_CASE_ )
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''MLukeTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
'''simple docstring''' from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
334
1
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A: Any = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''num_heads''' ) ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : List[str]=64 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[16, 48, 96] , SCREAMING_SNAKE_CASE_ : str=[1, 3, 6] , SCREAMING_SNAKE_CASE_ : Any=[1, 2, 10] , SCREAMING_SNAKE_CASE_ : Tuple=[7, 3, 3] , SCREAMING_SNAKE_CASE_ : Dict=[4, 2, 2] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[2, 1, 1] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[2, 2, 2] , SCREAMING_SNAKE_CASE_ : Optional[int]=[False, False, True] , SCREAMING_SNAKE_CASE_ : str=[0.0, 0.0, 0.0] , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1E-12 , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , ) -> str: '''simple docstring''' A: Optional[int] = parent A: Any = batch_size A: Union[str, Any] = image_size A: int = patch_sizes A: Optional[int] = patch_stride A: Any = patch_padding A: int = is_training A: Union[str, Any] = use_labels A: Union[str, Any] = num_labels A: Optional[int] = num_channels A: List[Any] = embed_dim A: Optional[int] = num_heads A: int = stride_kv A: Tuple = depth A: Dict = cls_token A: Tuple = attention_drop_rate A: List[str] = initializer_range A: List[Any] = layer_norm_eps def _snake_case ( self : str ) -> List[Any]: '''simple docstring''' A: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A: List[Any] = None if self.use_labels: A: str = ids_tensor([self.batch_size] , self.num_labels ) A: Any = self.get_config() return config, pixel_values, labels def _snake_case ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: '''simple docstring''' A: List[str] = CvtModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: int = model(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = (self.image_size, self.image_size) A , A: Tuple = image_size[0], image_size[1] for i in range(len(self.depth ) ): A: Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) A: int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' A: List[str] = self.num_labels A: Dict = CvtForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Tuple = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Union[str, Any] ) -> str: '''simple docstring''' A: Dict = self.prepare_config_and_inputs() A , A , A: Union[str, Any] = config_and_inputs A: int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : int = (CvtModel, CvtForImageClassification) if is_torch_available() else () UpperCamelCase_ : Union[str, Any] = ( {"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification} if is_torch_available() else {} ) UpperCamelCase_ : Dict = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : List[Any] = False UpperCamelCase_ : Dict = False UpperCamelCase_ : Any = False def _snake_case ( self : Optional[int] ) -> List[Any]: '''simple docstring''' A: Dict = CvtModelTester(self ) A: str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def _snake_case ( self : Optional[Any] ) -> Tuple: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self : Tuple ) -> Tuple: '''simple docstring''' return @unittest.skip(reason='''Cvt does not output attentions''' ) def _snake_case ( self : Dict ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def _snake_case ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def _snake_case ( self : int ) -> Any: '''simple docstring''' pass def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A , A: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ ) A: int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A: str = [*signature.parameters.keys()] A: Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Any: '''simple docstring''' A: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Dict: '''simple docstring''' def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ): A: List[str] = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): A: str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) A: Optional[Any] = outputs.hidden_states A: Dict = len(self.model_tester.depth ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) A , A: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: List[Any] = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A: Dict = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' A: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _snake_case ( self : Optional[int] ) -> Any: '''simple docstring''' pass @slow def _snake_case ( self : Dict ) -> Dict: '''simple docstring''' for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: List[Any] = CvtModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE( ) -> List[Any]: A: Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Tuple ) -> str: '''simple docstring''' return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _snake_case ( self : str ) -> Any: '''simple docstring''' A: Optional[int] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE_ ) A: List[Any] = self.default_image_processor A: List[str] = prepare_img() A: Tuple = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): A: Tuple = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits A: Tuple = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) A: int = torch.tensor([0.9285, 0.9015, -0.3150] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
334
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' @register_to_config def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 6_55_36 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Tuple: '''simple docstring''' super().__init__() A: Optional[Any] = sample_size # time if time_embedding_type == "fourier": A: Tuple = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ) A: List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": A: str = Timesteps( block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ ) A: Any = block_out_channels[0] if use_timestep_embedding: A: Optional[Any] = block_out_channels[0] * 4 A: List[Any] = TimestepEmbedding( in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , ) A: Optional[Any] = nn.ModuleList([] ) A: str = None A: str = nn.ModuleList([] ) A: Tuple = None # down A: Any = in_channels for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = output_channel A: List[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels A: List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[int] = get_down_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid A: Union[str, Any] = get_mid_block( SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , ) # up A: Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) ) A: List[str] = reversed_block_out_channels[0] if out_block_type is None: A: int = out_channels else: A: Union[str, Any] = block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: List[Any] = output_channel A: int = ( reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels ) A: Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[Any] = get_up_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) A: Any = output_channel # out A: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) A: Optional[int] = get_out_block( out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[UNetaDOutput, Tuple]: '''simple docstring''' A: Any = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): A: Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: A: List[str] = timesteps[None].to(sample.device ) A: int = self.time_proj(SCREAMING_SNAKE_CASE_ ) if self.config.use_timestep_embedding: A: List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ ) else: A: str = timestep_embed[..., None] A: Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) A: Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down A: List[str] = () for downsample_block in self.down_blocks: A , A: Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: A: Dict = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): A: List[Any] = down_block_res_samples[-1:] A: List[str] = down_block_res_samples[:-1] A: Optional[int] = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) # 5. post-process if self.out_block: A: Any = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCamelCase = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None: '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None: '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
334
'''simple docstring''' from collections import deque class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' A: Union[str, Any] = process_name # process name A: List[str] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A: Dict = arrival_time A: Optional[Any] = burst_time # remaining burst time A: Any = 0 # total time of the process wait in ready queue A: Any = 0 # time from arrival time to completion time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None: '''simple docstring''' A: Dict = number_of_queues # time slice of queues that round robin algorithm applied A: int = time_slices # unfinished process is in this ready_queue A: Tuple = queue # current time A: int = current_time # finished process is in this sequence queue A: deque[Process] = deque() def _snake_case ( self : List[Any] ) -> list[str]: '''simple docstring''' A: str = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Optional[int] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Any = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: List[Any] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]: '''simple docstring''' return [q.burst_time for q in queue] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int: '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of finished process while len(SCREAMING_SNAKE_CASE_ ) != 0: A: Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A: Any = 0 # set the process's turnaround time because it is finished A: int = self.current_time - cp.arrival_time # set the completion time A: List[str] = self.current_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): A: Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A: Optional[Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(SCREAMING_SNAKE_CASE_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A: int = 0 # set the finish time A: Union[str, Any] = self.current_time # update the process' turnaround time because it is finished A: Tuple = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _snake_case ( self : Optional[Any] ) -> deque[Process]: '''simple docstring''' for i in range(self.number_of_queues - 1 ): A , A: Optional[Any] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0) UpperCamelCase = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( f'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( f'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
334
1
'''simple docstring''' import math def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> float: if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_6_0: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__lowercase ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='''malus_law''')
334
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger() @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> int: '''simple docstring''' A: List[str] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(SCREAMING_SNAKE_CASE_ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Dict: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(SCREAMING_SNAKE_CASE_ ) [x.remove() for x in self.handles] return self @property def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : nn.Module UpperCamelCase_ : int = 0 UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]: '''simple docstring''' A: Dict = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized A: Tuple = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) ) A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise Exception( f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while""" f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" ) for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = True ) -> Any: print(F"""Converting {name}...""" ) with torch.no_grad(): A: Union[str, Any] = timm.create_model(__lowercase , pretrained=__lowercase ).eval() A: List[str] = ResNetForImageClassification(__lowercase ).eval() A: int = ModuleTransfer(src=__lowercase , dest=__lowercase ) A: List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(__lowercase ) assert torch.allclose(from_model(__lowercase ) , our_model(__lowercase ).logits ), "The model logits don't match the original one." A: str = F"""resnet{'-'.join(name.split('resnet' ) )}""" print(__lowercase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowercase , ) # we can use the convnext one A: Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , ) print(F"""Pushed {checkpoint_name}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> List[Any]: A: Union[str, Any] = '''imagenet-1k-id2label.json''' A: Union[str, Any] = 1_0_0_0 A: Optional[int] = (1, num_labels) A: Dict = '''huggingface/label-files''' A: Any = num_labels A: Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Optional[int] = {int(__lowercase ): v for k, v in idalabel.items()} A: Optional[int] = idalabel A: List[str] = {v: k for k, v in idalabel.items()} A: str = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase ) A: Optional[Any] = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(__lowercase , names_to_config[model_name] , __lowercase , __lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__lowercase , __lowercase , __lowercase , __lowercase ) return config, expected_shape if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=13 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=99 , SCREAMING_SNAKE_CASE_ : Optional[int]=16 , SCREAMING_SNAKE_CASE_ : Tuple=36 , SCREAMING_SNAKE_CASE_ : int=6 , SCREAMING_SNAKE_CASE_ : List[str]=6 , SCREAMING_SNAKE_CASE_ : str=6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=37 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE_ : int=16 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ) -> Optional[Any]: '''simple docstring''' A: List[str] = parent A: List[Any] = batch_size A: Union[str, Any] = seq_length A: str = is_training A: str = use_input_mask A: int = use_token_type_ids A: Union[str, Any] = use_labels A: Tuple = vocab_size A: Optional[Any] = embedding_size A: List[Any] = hidden_size A: int = num_hidden_layers A: Optional[int] = num_hidden_groups A: int = num_attention_heads A: Dict = intermediate_size A: List[str] = hidden_act A: List[str] = hidden_dropout_prob A: Dict = attention_probs_dropout_prob A: Optional[int] = max_position_embeddings A: Union[str, Any] = type_vocab_size A: int = type_sequence_label_size A: List[Any] = initializer_range A: List[Any] = num_labels A: Any = num_choices A: List[Any] = scope def _snake_case ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' A: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A: Tuple = None if self.use_input_mask: A: Tuple = random_attention_mask([self.batch_size, self.seq_length] ) A: str = None if self.use_token_type_ids: A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A: Optional[int] = None A: List[str] = None A: List[Any] = None if self.use_labels: A: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A: int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A: Tuple = ids_tensor([self.batch_size] , self.num_choices ) A: Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any: '''simple docstring''' A: Optional[Any] = AlbertModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Tuple = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) A: Dict = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict: '''simple docstring''' A: List[Any] = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: str = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , sentence_order_label=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: '''simple docstring''' A: Union[str, Any] = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]: '''simple docstring''' A: int = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Union[str, Any] = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' A: Union[str, Any] = self.num_labels A: int = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = self.num_labels A: Any = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A: Any = self.num_choices A: Optional[Any] = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A: List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A: Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A: List[Any] = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' A: Optional[int] = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ): Tuple = config_and_inputs A: Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : int = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) UpperCamelCase_ : Dict = ( { """feature-extraction""": AlbertModel, """fill-mask""": AlbertForMaskedLM, """question-answering""": AlbertForQuestionAnswering, """text-classification""": AlbertForSequenceClassification, """token-classification""": AlbertForTokenClassification, """zero-shot""": AlbertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase_ : Union[str, Any] = True def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]: '''simple docstring''' A: List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE_ ): A: List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) return inputs_dict def _snake_case ( self : List[Any] ) -> Tuple: '''simple docstring''' A: Optional[Any] = AlbertModelTester(self ) A: Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str ) -> Dict: '''simple docstring''' A: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' A: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> str: '''simple docstring''' A: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> int: '''simple docstring''' A: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A: Optional[int] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Any ) -> Any: '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: List[Any] = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _snake_case ( self : List[Any] ) -> int: '''simple docstring''' A: Optional[Any] = AlbertModel.from_pretrained('''albert-base-v2''' ) A: Optional[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) A: Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A: Optional[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] A: Any = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) A: Any = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
334
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str | Literal[False]: A: List[str] = list(__lowercase ) A: Optional[Any] = list(__lowercase ) A: int = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count += 1 A: Optional[Any] = '''_''' if count > 1: return False else: return "".join(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[str]: A: Any = [] while True: A: Dict = ['''$'''] * len(__lowercase ) A: Union[str, Any] = [] for i in range(len(__lowercase ) ): for j in range(i + 1 , len(__lowercase ) ): A: Any = compare_string(binary[i] , binary[j] ) if k is False: A: Any = '''*''' A: List[Any] = '''*''' temp.append('''X''' ) for i in range(len(__lowercase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__lowercase ) == 0: return pi A: List[Any] = list(set(__lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]: A: Optional[int] = [] for minterm in minterms: A: Optional[int] = '''''' for _ in range(__lowercase ): A: List[Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(__lowercase ) return temp def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> bool: A: Union[str, Any] = list(__lowercase ) A: Union[str, Any] = list(__lowercase ) A: Optional[int] = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]: A: List[Any] = [] A: Dict = [0] * len(__lowercase ) for i in range(len(chart[0] ) ): A: List[str] = 0 A: str = -1 for j in range(len(__lowercase ) ): if chart[j][i] == 1: count += 1 A: Any = j if count == 1: A: Any = 1 for i in range(len(__lowercase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__lowercase ) ): A: Optional[int] = 0 temp.append(prime_implicants[i] ) while True: A: Dict = 0 A: Optional[int] = -1 A: Dict = 0 for i in range(len(__lowercase ) ): A: str = chart[i].count(1 ) if count_n > max_n: A: Tuple = count_n A: Optional[Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__lowercase ) ): A: Any = 0 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[int]]: A: str = [[0 for x in range(len(__lowercase ) )] for x in range(len(__lowercase ) )] for i in range(len(__lowercase ) ): A: Tuple = prime_implicants[i].count('''_''' ) for j in range(len(__lowercase ) ): if is_for_table(prime_implicants[i] , binary[j] , __lowercase ): A: Optional[Any] = 1 return chart def SCREAMING_SNAKE_CASE( ) -> None: A: int = int(input('''Enter the no. of variables\n''' ) ) A: Optional[int] = [ float(__lowercase ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] A: List[str] = decimal_to_binary(__lowercase , __lowercase ) A: str = check(__lowercase ) print('''Prime Implicants are:''' ) print(__lowercase ) A: List[Any] = prime_implicant_chart(__lowercase , __lowercase ) A: Any = selection(__lowercase , __lowercase ) print('''Essential Prime Implicants are:''' ) print(__lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
334
1
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask UpperCamelCase = logging.getLogger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int=-1 ) -> List[Any]: '''simple docstring''' A: Any = label_idx def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[Split, str] ) -> List[InputExample]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: Optional[int] = mode.value A: List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{mode}.txt""" ) A: List[Any] = 1 A: Union[str, Any] = [] with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as f: A: Any = [] A: Tuple = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) ) guid_index += 1 A: Tuple = [] A: Union[str, Any] = [] else: A: str = line.split(''' ''' ) words.append(splits[0] ) if len(SCREAMING_SNAKE_CASE_ ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) ) return examples def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : TextIO , SCREAMING_SNAKE_CASE_ : TextIO , SCREAMING_SNAKE_CASE_ : List ) -> Union[str, Any]: '''simple docstring''' A: Any = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(SCREAMING_SNAKE_CASE_ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A: Tuple = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(SCREAMING_SNAKE_CASE_ ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> List[str]: '''simple docstring''' if path: with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f: A: Union[str, Any] = f.read().splitlines() if "O" not in labels: A: List[Any] = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : List[str] ) -> Union[str, Any]: '''simple docstring''' super().__init__(label_idx=-2 ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[str]: '''simple docstring''' if path: with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f: A: Any = f.read().splitlines() if "O" not in labels: A: Optional[int] = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[Split, str] ) -> List[InputExample]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: str = mode.value A: Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{mode}.txt""" ) A: Optional[Any] = 1 A: int = [] with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as f: for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = [] A: str = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) ) guid_index += 1 return examples def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : TextIO , SCREAMING_SNAKE_CASE_ : TextIO , SCREAMING_SNAKE_CASE_ : List ) -> int: '''simple docstring''' A: List[Any] = 0 for sentence in parse_incr(SCREAMING_SNAKE_CASE_ ): A: int = preds_list[example_id] A: str = '''''' for token in sentence: out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """ out += "\n" writer.write(SCREAMING_SNAKE_CASE_ ) example_id += 1 def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> List[str]: '''simple docstring''' if path: with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
334
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: Tuple = len(__lowercase ) for i in range(length - 1 ): A: Dict = i for k in range(i + 1 , __lowercase ): if collection[k] < collection[least]: A: List[str] = k if least != i: A , A: Tuple = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=False ) -> Optional[int]: if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): A: Union[str, Any] = len(set_a.intersection(__lowercase ) ) if alternative_union: A: List[Any] = len(__lowercase ) + len(__lowercase ) else: A: List[str] = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): A: Dict = [element for element in set_a if element in set_b] if alternative_union: A: Union[str, Any] = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: A: Dict = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": UpperCamelCase = {'''a''', '''b''', '''c''', '''d''', '''e'''} UpperCamelCase = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
334
'''simple docstring''' class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' A: Tuple = None A: Dict = None A: Optional[int] = graph self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: str = len(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = None def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str: '''simple docstring''' if sources is int: A: Union[str, Any] = [sources] if sinks is int: A: Tuple = [sinks] if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: return A: List[str] = sources[0] A: Optional[int] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1: A: Any = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A: Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A: Optional[Any] = max_input_flow A: Optional[Any] = 0 A: str = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A: Optional[Any] = max_input_flow A: str = size - 1 def _snake_case ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: '''simple docstring''' A: Optional[Any] = algorithm(self ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]: '''simple docstring''' A: str = flow_network A: List[str] = flow_network.verticesCount A: Dict = flow_network.sourceIndex A: Any = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A: str = flow_network.graph A: str = False def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' if not self.executed: self._algorithm() A: str = True def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) # use this to save your result A: Any = -1 def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )] A: Any = [0] * self.verticies_count A: Optional[Any] = [0] * self.verticies_count def _snake_case ( self : str ) -> Optional[Any]: '''simple docstring''' A: Any = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A: str = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A: Dict = 0 while i < len(SCREAMING_SNAKE_CASE_ ): A: Any = vertices_list[i] A: str = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) ) A: Tuple = 0 else: i += 1 A: Tuple = sum(self.preflow[self.source_index] ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.relabel(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int: '''simple docstring''' A: Optional[int] = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int: '''simple docstring''' A: Optional[Any] = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A: List[Any] = self.heights[to_index] if min_height is not None: A: int = min_height + 1 if __name__ == "__main__": UpperCamelCase = [0] UpperCamelCase = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network UpperCamelCase = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate UpperCamelCase = flow_network.find_maximum_flow() print(f'maximum flow is {maximum_flow}')
334
1
'''simple docstring''' import os from distutils.util import strtobool def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]: for e in env_keys: A: Dict = int(os.environ.get(__lowercase , -1 ) ) if val >= 0: return val return default def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> List[str]: A: str = os.environ.get(__lowercase , str(__lowercase ) ) return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int... def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="no" ) -> str: A: Optional[int] = os.environ.get(__lowercase , str(__lowercase ) ) return value
334
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ["""input_features""", """attention_mask"""] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : int=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]: '''simple docstring''' super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = num_mel_bins A: str = do_ceptral_normalize A: int = normalize_means A: List[Any] = normalize_vars A: Any = True def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , ) -> np.ndarray: '''simple docstring''' A: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) A: List[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> np.ndarray: '''simple docstring''' if normalize_means: A: str = x[:input_length].mean(axis=0 ) A: Dict = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if normalize_vars: A: Tuple = x[:input_length].std(axis=0 ) A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if input_length < x.shape[0]: A: Optional[int] = padding_value # make sure array is in float32 A: Optional[Any] = x.astype(np.floataa ) return x def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]: '''simple docstring''' A: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A: Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) A: Optional[Any] = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): A: int = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A: Any = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A: Union[str, Any] = [raw_speech] # extract fbank features A: str = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech] # convert into correct format for padding A: int = BatchFeature({'''input_features''': features} ) A: int = self.pad( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # make sure list is in array format A: List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ): A: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features] A: List[Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A: Dict = ( np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD else None ) A: List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ ) if return_tensors is not None: A: Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return padded_inputs
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = DebertaTokenizer UpperCamelCase_ : List[str] = True UpperCamelCase_ : int = DebertaTokenizerFast def _snake_case ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A: Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] A: Union[str, Any] = {'''unk_token''': '''[UNK]'''} A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]: '''simple docstring''' A: Optional[int] = '''lower newer''' A: str = '''lower newer''' return input_text, output_text def _snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A: str = self.get_tokenizer() A: Any = '''lower newer''' A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokens + [tokenizer.unk_token] A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Any: '''simple docstring''' A: str = self.get_tokenizer() A: List[str] = tokenizer('''Hello''' , '''World''' ) A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Tuple ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: int = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Dict = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']] # fmt: off A: Any = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on A: Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ ) for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : List[str] = """data2vec-text""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=3_05_22 , SCREAMING_SNAKE_CASE_ : str=7_68 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Tuple=30_72 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_12 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Dict=1E-12 , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Dict="absolute" , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: Dict = vocab_size A: List[str] = hidden_size A: Optional[int] = num_hidden_layers A: Optional[int] = num_attention_heads A: Optional[Any] = hidden_act A: Tuple = intermediate_size A: List[Any] = hidden_dropout_prob A: int = attention_probs_dropout_prob A: Tuple = max_position_embeddings A: List[str] = type_vocab_size A: int = initializer_range A: List[Any] = layer_norm_eps A: str = position_embedding_type A: Dict = use_cache A: Optional[int] = classifier_dropout class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' @property def _snake_case ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A: List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A: Any = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
334
'''simple docstring''' import requests UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def SCREAMING_SNAKE_CASE( __lowercase ) -> None: # fetching a list of articles in json format A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['''articles'''] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
334
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''', } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Any = """blip_2_vision_model""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int]=14_08 , SCREAMING_SNAKE_CASE_ : Optional[Any]=61_44 , SCREAMING_SNAKE_CASE_ : str=39 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE_ : Dict=2_24 , SCREAMING_SNAKE_CASE_ : Optional[int]=14 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Any=0.0_0001 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE_ : Tuple=1E-10 , SCREAMING_SNAKE_CASE_ : List[Any]=True , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Tuple: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) A: Any = hidden_size A: Union[str, Any] = intermediate_size A: List[Any] = num_hidden_layers A: Any = num_attention_heads A: Optional[int] = patch_size A: int = image_size A: str = initializer_range A: Optional[Any] = attention_dropout A: List[Any] = layer_norm_eps A: Union[str, Any] = hidden_act A: Optional[Any] = qkv_bias @classmethod def _snake_case ( cls : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Any ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ ) A , A: Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": A: Tuple = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Dict = """blip_2_qformer""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE_ : str=7_68 , SCREAMING_SNAKE_CASE_ : Tuple=12 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=30_72 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Any=5_12 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1E-12 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=14_08 , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: str = vocab_size A: Any = hidden_size A: str = num_hidden_layers A: Tuple = num_attention_heads A: int = hidden_act A: str = intermediate_size A: Dict = hidden_dropout_prob A: List[Any] = attention_probs_dropout_prob A: Dict = max_position_embeddings A: int = initializer_range A: Optional[int] = layer_norm_eps A: List[str] = position_embedding_type A: Optional[int] = cross_attention_frequency A: Optional[Any] = encoder_hidden_size @classmethod def _snake_case ( cls : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : str ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ ) A , A: List[str] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('''model_type''' ) == "blip-2": A: Optional[Any] = config_dict['''qformer_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : str = """blip-2""" UpperCamelCase_ : List[str] = True def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Any=32 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) if vision_config is None: A: Optional[Any] = {} logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' ) if qformer_config is None: A: Tuple = {} logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' ) if text_config is None: A: Optional[Any] = {} logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' ) A: List[Any] = BlipaVisionConfig(**SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = BlipaQFormerConfig(**SCREAMING_SNAKE_CASE_ ) A: Tuple = text_config['''model_type'''] if '''model_type''' in text_config else '''opt''' A: Dict = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE_ ) A: List[Any] = self.text_config.tie_word_embeddings A: List[str] = self.text_config.is_encoder_decoder A: Any = num_query_tokens A: Optional[Any] = self.vision_config.hidden_size A: Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES A: int = 1.0 A: Dict = 0.02 @classmethod def _snake_case ( cls : Any , SCREAMING_SNAKE_CASE_ : BlipaVisionConfig , SCREAMING_SNAKE_CASE_ : BlipaQFormerConfig , SCREAMING_SNAKE_CASE_ : PretrainedConfig , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> List[str]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE_ , ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' A: int = copy.deepcopy(self.__dict__ ) A: Any = self.vision_config.to_dict() A: List[str] = self.qformer_config.to_dict() A: int = self.text_config.to_dict() A: Union[str, Any] = self.__class__.model_type return output
334
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } UpperCamelCase = { '''camembert-base''': 512, } UpperCamelCase = '''▁''' class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : int = CamembertTokenizer def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : Any , ) -> Any: '''simple docstring''' A: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Any = vocab_file A: Any = False if not self.vocab_file else True def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: List[str] = [self.cls_token_id] A: List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: List[str] = [self.sep_token_id] A: Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Dict = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
334
1
'''simple docstring''' from maths.prime_factors import prime_factors def SCREAMING_SNAKE_CASE( __lowercase ) -> int: if not isinstance(__lowercase , __lowercase ): A: Any = F"""Input value of [number={number}] must be an integer""" raise TypeError(__lowercase ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(__lowercase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' import os from distutils.util import strtobool def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]: for e in env_keys: A: Dict = int(os.environ.get(__lowercase , -1 ) ) if val >= 0: return val return default def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> List[str]: A: str = os.environ.get(__lowercase , str(__lowercase ) ) return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int... def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="no" ) -> str: A: Optional[int] = os.environ.get(__lowercase , str(__lowercase ) ) return value
334
1
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int = 6 ) -> None: '''simple docstring''' A: Node | None = None A: Node | None = None self.create_linked_list(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' A: Tuple = Node() A: Optional[Any] = current_node A: Dict = current_node A: int = current_node for _ in range(1 , SCREAMING_SNAKE_CASE_ ): A: List[str] = Node() A: Any = current_node A: Tuple = previous_node A: List[str] = current_node A: int = self.front A: str = previous_node def _snake_case ( self : List[Any] ) -> bool: '''simple docstring''' return ( self.front == self.rear and self.front is not None and self.front.data is None ) def _snake_case ( self : Tuple ) -> Any | None: '''simple docstring''' self.check_can_perform_operation() return self.front.data if self.front else None def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> None: '''simple docstring''' if self.rear is None: return self.check_is_full() if not self.is_empty(): A: Dict = self.rear.next if self.rear: A: Tuple = data def _snake_case ( self : Dict ) -> Any: '''simple docstring''' self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: A: List[Any] = self.front.data A: Optional[int] = None return data A: str = self.front A: Any = old_front.next A: Union[str, Any] = old_front.data A: Optional[Any] = None return data def _snake_case ( self : List[Any] ) -> None: '''simple docstring''' if self.is_empty(): raise Exception('''Empty Queue''' ) def _snake_case ( self : List[str] ) -> None: '''simple docstring''' if self.rear and self.rear.next == self.front: raise Exception('''Full Queue''' ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any ) -> None: '''simple docstring''' A: Any | None = None A: Node | None = None A: Node | None = None if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() UpperCamelCase = logging.get_logger('''transformers.models.encodec''') UpperCamelCase = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } UpperCamelCase = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } UpperCamelCase = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } UpperCamelCase = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } UpperCamelCase = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } UpperCamelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } UpperCamelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } UpperCamelCase = [] UpperCamelCase = [] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict: for attribute in key.split('''.''' ): A: Union[str, Any] = getattr(__lowercase , __lowercase ) if weight_type is not None: A: Tuple = getattr(__lowercase , __lowercase ).shape else: A: str = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": A: Dict = value elif weight_type == "weight_g": A: Tuple = value elif weight_type == "weight_v": A: Any = value elif weight_type == "bias": A: str = value elif weight_type == "running_mean": A: List[Any] = value elif weight_type == "running_var": A: Dict = value elif weight_type == "num_batches_tracked": A: List[str] = value elif weight_type == "weight_ih_l0": A: Dict = value elif weight_type == "weight_hh_l0": A: Optional[int] = value elif weight_type == "bias_ih_l0": A: List[Any] = value elif weight_type == "bias_hh_l0": A: str = value elif weight_type == "weight_ih_l1": A: Optional[int] = value elif weight_type == "weight_hh_l1": A: int = value elif weight_type == "bias_ih_l1": A: Optional[Any] = value elif weight_type == "bias_hh_l1": A: str = value else: A: Optional[int] = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: A , A: Any = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: A: Any = [] if model_name == "encodec_24khz" or "encodec_32khz": A: List[str] = MAPPING_24K elif model_name == "encodec_48khz": A: List[Any] = MAPPING_48K else: raise ValueError(F"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(__lowercase , __lowercase ): logger.info(F"""{name} was ignored""" ) continue A: Optional[int] = False for key, mapped_key in MAPPING.items(): if "*" in key: A , A: Optional[int] = key.split('''.*.''' ) if prefix in name and suffix in name: A: str = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue A: Optional[Any] = True if "*" in mapped_key: A: Any = name.split(__lowercase )[0].split('''.''' )[-2] A: Tuple = mapped_key.replace('''*''' , __lowercase ) if "weight_g" in name: A: str = '''weight_g''' elif "weight_v" in name: A: List[Any] = '''weight_v''' elif "weight_ih_l0" in name: A: Dict = '''weight_ih_l0''' elif "weight_hh_l0" in name: A: int = '''weight_hh_l0''' elif "bias_ih_l0" in name: A: Union[str, Any] = '''bias_ih_l0''' elif "bias_hh_l0" in name: A: Tuple = '''bias_hh_l0''' elif "weight_ih_l1" in name: A: int = '''weight_ih_l1''' elif "weight_hh_l1" in name: A: Optional[Any] = '''weight_hh_l1''' elif "bias_ih_l1" in name: A: Dict = '''bias_ih_l1''' elif "bias_hh_l1" in name: A: str = '''bias_hh_l1''' elif "bias" in name: A: Union[str, Any] = '''bias''' elif "weight" in name: A: Dict = '''weight''' elif "running_mean" in name: A: Tuple = '''running_mean''' elif "running_var" in name: A: Any = '''running_var''' elif "num_batches_tracked" in name: A: str = '''num_batches_tracked''' else: A: Tuple = None set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) continue if not is_used: unused_weights.append(__lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ) -> Dict: if config_path is not None: A: Tuple = EncodecConfig.from_pretrained(__lowercase ) else: A: Union[str, Any] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A: Union[str, Any] = [8, 5, 4, 4] A: Dict = [2.2] A: List[Any] = 6_4 A: Optional[Any] = 3_2_0_0_0 A: List[Any] = 2_0_4_8 A: Optional[Any] = False A: int = False A: Union[str, Any] = False elif model_name == "encodec_48khz": A: Optional[int] = [8, 5, 4, 2] A: List[Any] = [3.0, 6.0, 1_2.0, 2_4.0] A: List[Any] = 4_8_0_0_0 A: int = 2 A: List[Any] = False A: Any = '''time_group_norm''' A: Optional[Any] = True A: Any = 1.0 A: Any = 0.0_1 else: raise ValueError(F"""Unknown model name: {model_name}""" ) A: str = EncodecModel(__lowercase ) A: Optional[Any] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(__lowercase ) A: Union[str, Any] = torch.load(__lowercase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A: Optional[int] = original_checkpoint['''best_state'''] recursively_load_weights(__lowercase , __lowercase , __lowercase ) model.save_pretrained(__lowercase ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(__lowercase ) model.push_to_hub(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
334
1
'''simple docstring''' import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : Optional[int] ) -> str: '''simple docstring''' A: Optional[int] = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: Tuple = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def _snake_case ( self : List[Any] ) -> Any: '''simple docstring''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: List[str] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) ) def _snake_case ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) ) self.assertEqual(arr.type , pa.intaa() ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): A: int = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) ) def _snake_case ( self : Tuple ) -> Any: '''simple docstring''' A: List[str] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) ) self.assertEqual(arr.type , pa.intaa() ) def _snake_case ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' A: List[str] = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) ) self.assertEqual(arr.type , pa.string() ) def _snake_case ( self : List[str] ) -> Optional[int]: '''simple docstring''' A: List[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) ) def _snake_case ( self : List[Any] ) -> Dict: '''simple docstring''' with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): A: Tuple = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) ) def _snake_case ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' A: Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) ) def _snake_case ( self : str ) -> Optional[int]: '''simple docstring''' A: Dict = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def _snake_case ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' import PIL.Image A: List[Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( '''datasets.arrow_writer.cast_to_python_objects''' , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects: A: Any = pa.array(TypedSequence([{'''path''': None, '''bytes''': b'''image_bytes'''}, pil_image] , type=Image() ) ) A , A: Tuple = mock_cast_to_python_objects.call_args_list[-1] self.assertIn('''optimize_list_casting''' , SCREAMING_SNAKE_CASE_ ) self.assertFalse(kwargs['''optimize_list_casting'''] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]: A: Dict = pa.BufferReader(__lowercase ) if isinstance(__lowercase , pa.Buffer ) else pa.memory_map(__lowercase ) A: List[Any] = pa.ipc.open_stream(__lowercase ) A: pa.Table = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] ) @pytest.mark.parametrize( '''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: A: str = pa.BufferOutputStream() A: List[Any] = pa.schema(__lowercase ) if fields else None with ArrowWriter(stream=__lowercase , schema=__lowercase , writer_batch_size=__lowercase ) as writer: writer.write({'''col_1''': '''foo''', '''col_2''': 1} ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} ) A , A: str = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: A: Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} assert writer._schema == pa.schema(__lowercase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def SCREAMING_SNAKE_CASE( ) -> Optional[int]: A: Optional[int] = pa.BufferOutputStream() A: str = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} ) with ArrowWriter(stream=__lowercase , features=__lowercase ) as writer: writer.write({'''labels''': 0} ) writer.write({'''labels''': 1} ) A , A: List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata A: List[str] = pa.BufferReader(output.getvalue() ) A: str = pa.ipc.open_stream(__lowercase ) A: pa.Table = f.read_all() A: Any = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(__lowercase ) @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: str = pa.BufferOutputStream() with ArrowWriter( stream=__lowercase , writer_batch_size=__lowercase , hash_salt='''split_name''' , check_duplicates=__lowercase , ) as writer: with pytest.raises(__lowercase ): writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] ) A , A: Tuple = writer.finalize() @pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 1_0] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: A: Optional[Any] = pa.BufferOutputStream() with ArrowWriter( stream=__lowercase , writer_batch_size=__lowercase , hash_salt='''split_name''' , check_duplicates=__lowercase , ) as writer: with pytest.raises(__lowercase ): writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1_0 ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=1_0 ) A , A: Union[str, Any] = writer.finalize() @pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 1_0] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> str: A: Dict = pa.BufferOutputStream() with ArrowWriter( stream=__lowercase , writer_batch_size=__lowercase , hash_salt='''split_name''' , check_duplicates=__lowercase , ) as writer: writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 ) A , A: Optional[int] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] ) @pytest.mark.parametrize( '''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: A: Optional[int] = pa.BufferOutputStream() A: List[str] = pa.schema(__lowercase ) if fields else None with ArrowWriter(stream=__lowercase , schema=__lowercase , writer_batch_size=__lowercase ) as writer: writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) writer.write_batch({'''col_1''': [], '''col_2''': []} ) A , A: Tuple = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: A: Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} assert writer._schema == pa.schema(__lowercase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] ) @pytest.mark.parametrize( '''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: A: List[Any] = pa.BufferOutputStream() A: List[Any] = pa.schema(__lowercase ) if fields else None with ArrowWriter(stream=__lowercase , schema=__lowercase , writer_batch_size=__lowercase ) as writer: writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) ) A , A: Union[str, Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: A: List[Any] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} assert writer._schema == pa.schema(__lowercase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 1_0] ) @pytest.mark.parametrize( '''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: A: Tuple = pa.BufferOutputStream() A: int = pa.schema(__lowercase ) if fields else None with ArrowWriter(stream=__lowercase , schema=__lowercase , writer_batch_size=__lowercase ) as writer: writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) ) writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) ) A , A: List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: A: Union[str, Any] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} assert writer._schema == pa.schema(__lowercase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def SCREAMING_SNAKE_CASE( ) -> str: with tempfile.TemporaryDirectory() as tmp_dir: A: str = {'''col_1''': pa.string(), '''col_2''': pa.intaa()} A: Union[str, Any] = os.path.join(__lowercase , '''test.arrow''' ) with ArrowWriter(path=__lowercase , schema=pa.schema(__lowercase ) ) as writer: writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) A , A: List[str] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(__lowercase , metadata=writer._schema.metadata ) _check_output(__lowercase , 1 ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]: if pa.types.is_list(__lowercase ): return get_base_dtype(arr_type.value_type ) else: return arr_type def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str: if isinstance(lst[0] , __lowercase ): change_first_primitive_element_in_list(lst[0] , __lowercase ) else: A: int = value @pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] ) @pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: A: Optional[Any] = pa.array(TypedSequence(__lowercase , optimized_int_type=__lowercase ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( '''col, expected_dtype''' , [ ('''attention_mask''', pa.inta()), ('''special_tokens_mask''', pa.inta()), ('''token_type_ids''', pa.inta()), ('''input_ids''', pa.intaa()), ('''other''', pa.intaa()), ] , ) @pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: # in range A: Optional[int] = pa.array(OptimizedTypedSequence(__lowercase , col=__lowercase ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications A: List[str] = copy.deepcopy(__lowercase ) A: int = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(__lowercase , __lowercase ) A: int = pa.array(OptimizedTypedSequence(__lowercase , col=__lowercase ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize('''raise_exception''' , [False, True] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: A: int = str(tmp_path / '''dataset-train.arrow''' ) try: with ArrowWriter(path=__lowercase ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: A: Any = '''mock://dataset-train.arrow''' with ArrowWriter(path=__lowercase , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(__lowercase ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({'''col_1''': '''foo''', '''col_2''': 1} ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} ) A , A: Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(__lowercase ) def SCREAMING_SNAKE_CASE( ) -> Dict: A: Any = pa.BufferOutputStream() with ParquetWriter(stream=__lowercase ) as writer: writer.write({'''col_1''': '''foo''', '''col_2''': 1} ) writer.write({'''col_1''': '''bar''', '''col_2''': 2} ) A , A: Dict = writer.finalize() assert num_examples == 2 assert num_bytes > 0 A: List[Any] = pa.BufferReader(output.getvalue() ) A: pa.Table = pq.read_table(__lowercase ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize('''embed_local_files''' , [False, True] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: import PIL.Image A: int = str(tmp_path / '''test_image_rgb.jpg''' ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__lowercase , format='''png''' ) A: List[Any] = pa.BufferOutputStream() with ParquetWriter( stream=__lowercase , features=Features({'''image''': Image()} ) , embed_local_files=__lowercase ) as writer: writer.write({'''image''': image_path} ) writer.finalize() A: Union[str, Any] = pa.BufferReader(output.getvalue() ) A: pa.Table = pq.read_table(__lowercase ) A: int = pa_table.to_pydict() if embed_local_files: assert isinstance(out['''image'''][0]['''path'''] , __lowercase ) with open(__lowercase , '''rb''' ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def SCREAMING_SNAKE_CASE( ) -> str: A: Union[str, Any] = pa.schema([pa.field('''col_1''' , pa.string() , nullable=__lowercase )] ) A: str = pa.BufferOutputStream() with ArrowWriter(stream=__lowercase ) as writer: writer._build_writer(inferred_schema=__lowercase ) assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated UpperCamelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ UpperCamelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Tuple = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowercase )[0] @deprecated(__lowercase , '''Please use tf.data to implement this functionality.''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowercase ) as bytestream: A: Union[str, Any] = _readaa(__lowercase ) if magic != 2_0_5_1: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) A: Any = _readaa(__lowercase ) A: int = _readaa(__lowercase ) A: Optional[int] = _readaa(__lowercase ) A: Optional[int] = bytestream.read(rows * cols * num_images ) A: Tuple = numpy.frombuffer(__lowercase , dtype=numpy.uinta ) A: Optional[int] = data.reshape(__lowercase , __lowercase , __lowercase , 1 ) return data @deprecated(__lowercase , '''Please use tf.one_hot on tensors.''' ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]: A: int = labels_dense.shape[0] A: Union[str, Any] = numpy.arange(__lowercase ) * num_classes A: Optional[Any] = numpy.zeros((num_labels, num_classes) ) A: Dict = 1 return labels_one_hot @deprecated(__lowercase , '''Please use tf.data to implement this functionality.''' ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=1_0 ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowercase ) as bytestream: A: List[str] = _readaa(__lowercase ) if magic != 2_0_4_9: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) A: Optional[int] = _readaa(__lowercase ) A: Tuple = bytestream.read(__lowercase ) A: str = numpy.frombuffer(__lowercase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowercase , __lowercase ) return labels class lowerCAmelCase_ : '''simple docstring''' @deprecated( SCREAMING_SNAKE_CASE_ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : int=dtypes.floataa , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[int]=None , ) -> Optional[int]: '''simple docstring''' A , A: Optional[int] = random_seed.get_seed(SCREAMING_SNAKE_CASE_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) A: Optional[int] = dtypes.as_dtype(SCREAMING_SNAKE_CASE_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: A: List[str] = 1_00_00 A: Optional[Any] = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f"""images.shape: {images.shape} labels.shape: {labels.shape}""" A: Optional[int] = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 A: List[Any] = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. A: str = images.astype(numpy.floataa ) A: str = numpy.multiply(SCREAMING_SNAKE_CASE_ , 1.0 / 255.0 ) A: Union[str, Any] = images A: Union[str, Any] = labels A: List[str] = 0 A: Tuple = 0 @property def _snake_case ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' return self._images @property def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' return self._labels @property def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return self._num_examples @property def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' return self._epochs_completed def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : str=True ) -> Union[str, Any]: '''simple docstring''' if fake_data: A: List[str] = [1] * 7_84 A: Tuple = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE_ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE_ )], ) A: List[Any] = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: A: str = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = self.images[perma] A: Union[str, Any] = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch A: List[str] = self._num_examples - start A: Tuple = self._images[start : self._num_examples] A: str = self._labels[start : self._num_examples] # Shuffle the data if shuffle: A: Optional[int] = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = self.images[perm] A: Optional[int] = self.labels[perm] # Start next epoch A: List[str] = 0 A: List[str] = batch_size - rest_num_examples A: str = self._index_in_epoch A: Tuple = self._images[start:end] A: Union[str, Any] = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size A: List[str] = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowercase , '''Please write your own downloading logic.''' ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> str: if not gfile.Exists(__lowercase ): gfile.MakeDirs(__lowercase ) A: Any = os.path.join(__lowercase , __lowercase ) if not gfile.Exists(__lowercase ): urllib.request.urlretrieve(__lowercase , __lowercase ) # noqa: S310 with gfile.GFile(__lowercase ) as f: A: Union[str, Any] = f.size() print('''Successfully downloaded''' , __lowercase , __lowercase , '''bytes.''' ) return filepath @deprecated( __lowercase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=dtypes.floataa , __lowercase=True , __lowercase=5_0_0_0 , __lowercase=None , __lowercase=DEFAULT_SOURCE_URL , ) -> int: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowercase , one_hot=__lowercase , dtype=__lowercase , seed=__lowercase ) A: str = fake() A: Union[str, Any] = fake() A: List[Any] = fake() return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase ) if not source_url: # empty string check A: Optional[Any] = DEFAULT_SOURCE_URL A: Any = '''train-images-idx3-ubyte.gz''' A: int = '''train-labels-idx1-ubyte.gz''' A: str = '''t10k-images-idx3-ubyte.gz''' A: Dict = '''t10k-labels-idx1-ubyte.gz''' A: Tuple = _maybe_download( __lowercase , __lowercase , source_url + train_images_file ) with gfile.Open(__lowercase , '''rb''' ) as f: A: int = _extract_images(__lowercase ) A: Dict = _maybe_download( __lowercase , __lowercase , source_url + train_labels_file ) with gfile.Open(__lowercase , '''rb''' ) as f: A: Union[str, Any] = _extract_labels(__lowercase , one_hot=__lowercase ) A: Optional[Any] = _maybe_download( __lowercase , __lowercase , source_url + test_images_file ) with gfile.Open(__lowercase , '''rb''' ) as f: A: List[Any] = _extract_images(__lowercase ) A: Tuple = _maybe_download( __lowercase , __lowercase , source_url + test_labels_file ) with gfile.Open(__lowercase , '''rb''' ) as f: A: Optional[Any] = _extract_labels(__lowercase , one_hot=__lowercase ) if not 0 <= validation_size <= len(__lowercase ): A: Optional[Any] = ( '''Validation size should be between 0 and ''' F"""{len(__lowercase )}. Received: {validation_size}.""" ) raise ValueError(__lowercase ) A: List[Any] = train_images[:validation_size] A: Union[str, Any] = train_labels[:validation_size] A: str = train_images[validation_size:] A: Dict = train_labels[validation_size:] A: int = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} A: int = _DataSet(__lowercase , __lowercase , **__lowercase ) A: Optional[int] = _DataSet(__lowercase , __lowercase , **__lowercase ) A: List[str] = _DataSet(__lowercase , __lowercase , **__lowercase ) return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
334
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for data in source_data: for i, el in enumerate(__lowercase ): if len(__lowercase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowercase ) ) return data_lists def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for dlist, weight in zip(__lowercase , __lowercase ): A: List[str] = min(__lowercase ) A: Union[str, Any] = max(__lowercase ) A: list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A: List[str] = F"""Invalid weight of {weight:f} provided""" raise ValueError(__lowercase ) score_lists.append(__lowercase ) return score_lists def SCREAMING_SNAKE_CASE( __lowercase ) -> list[float]: A: list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowercase ): A: str = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: Any = get_data(__lowercase ) A: str = calculate_each_score(__lowercase , __lowercase ) A: int = generate_final_scores(__lowercase ) # append scores to source data for i, ele in enumerate(__lowercase ): source_data[i].append(__lowercase ) return source_data
334
1
'''simple docstring''' from manim import * class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : int ) -> int: '''simple docstring''' A: List[str] = Rectangle(height=0.5 , width=0.5 ) A: str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) A: List[str] = Rectangle(height=0.25 , width=0.25 ) A: List[Any] = [mem.copy() for i in range(6 )] A: int = [mem.copy() for i in range(6 )] A: Optional[Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 ) A: Any = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 ) A: int = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 ) A: Union[str, Any] = Text('''CPU''' , font_size=24 ) A: str = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(SCREAMING_SNAKE_CASE_ ) A: List[str] = [mem.copy() for i in range(4 )] A: Union[str, Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 ) A: Any = Text('''GPU''' , font_size=24 ) A: Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ ) gpu.move_to([-1, -1, 0] ) self.add(SCREAMING_SNAKE_CASE_ ) A: Dict = [mem.copy() for i in range(6 )] A: Optional[int] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 ) A: int = Text('''Model''' , font_size=24 ) A: str = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ ) model.move_to([3, -1.0, 0] ) self.add(SCREAMING_SNAKE_CASE_ ) A: Any = [] A: str = [] for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = fill.copy().set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 ) target.move_to(SCREAMING_SNAKE_CASE_ ) model_arr.append(SCREAMING_SNAKE_CASE_ ) A: str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(SCREAMING_SNAKE_CASE_ ) self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) A: int = [meta_mem.copy() for i in range(6 )] A: Dict = [meta_mem.copy() for i in range(6 )] A: int = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 ) A: Optional[int] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 ) A: List[str] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 ) A: List[Any] = Text('''Disk''' , font_size=24 ) A: Union[str, Any] = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ ) disk.move_to([-4, -1.25, 0] ) self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A: Any = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(SCREAMING_SNAKE_CASE_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(SCREAMING_SNAKE_CASE_ ) A: str = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(SCREAMING_SNAKE_CASE_ ) ) A: Union[str, Any] = Square(0.3 ) input.set_fill(SCREAMING_SNAKE_CASE_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , SCREAMING_SNAKE_CASE_ , buff=0.5 ) self.play(Write(SCREAMING_SNAKE_CASE_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=SCREAMING_SNAKE_CASE_ , buff=0.02 ) self.play(MoveToTarget(SCREAMING_SNAKE_CASE_ ) ) self.play(FadeOut(SCREAMING_SNAKE_CASE_ ) ) A: Dict = Arrow(start=SCREAMING_SNAKE_CASE_ , end=SCREAMING_SNAKE_CASE_ , color=SCREAMING_SNAKE_CASE_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , SCREAMING_SNAKE_CASE_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) A: Optional[int] = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) ) A: Union[str, Any] = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02} self.play( Write(SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_cpu_arr[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) A: Any = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , SCREAMING_SNAKE_CASE_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) A: Tuple = AnimationGroup( FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , FadeIn(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(SCREAMING_SNAKE_CASE_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: A: Optional[Any] = 0.7 self.play( Circumscribe(model_arr[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i] , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(model_arr[i + 1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(cpu_left_col_base[-1] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , Circumscribe(gpu_rect[0] , color=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) A: Union[str, Any] = a_c A: Optional[int] = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(SCREAMING_SNAKE_CASE_ ) , FadeOut(SCREAMING_SNAKE_CASE_ , run_time=0.5 ) , ) A: Tuple = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) , MoveToTarget(SCREAMING_SNAKE_CASE_ ) ) self.wait()
334
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = DPRContextEncoderTokenizer class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Dict = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Optional[int] = DPRQuestionEncoderTokenizer UpperCamelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) UpperCamelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ : '''simple docstring''' def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) elif titles is None or texts is None: A: Union[str, Any] = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Union[str, Any] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles] A: Optional[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts] A: str = len(SCREAMING_SNAKE_CASE_ ) A: List[Any] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages assert len(SCREAMING_SNAKE_CASE_ ) == len( SCREAMING_SNAKE_CASE_ ), f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts.""" A: Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: Dict = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: str = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] } if return_attention_mask is not False: A: Union[str, Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) A: Optional[Any] = attention_mask return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : BatchEncoding , SCREAMING_SNAKE_CASE_ : DPRReaderOutput , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : int = 64 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Any = reader_input['''input_ids'''] A , A , A: str = reader_output[:3] A: str = len(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ ) A: List[DPRReaderOutput] = [] for doc_id in sorted_docs: A: List[str] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence A: Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A: Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: A: int = len(SCREAMING_SNAKE_CASE_ ) A: Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(SCREAMING_SNAKE_CASE_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Union[str, Any] = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) A: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ ) A: Dict = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A: int = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(SCREAMING_SNAKE_CASE_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : Optional[Any] = DPRReaderTokenizer
334
1
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py UpperCamelCase = '''src/transformers''' UpperCamelCase = '''docs/source/en''' UpperCamelCase = '''.''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: with open(__lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A: str = f.readlines() # Find the start prompt. A: Union[str, Any] = 0 while not lines[start_index].startswith(__lowercase ): start_index += 1 start_index += 1 A: Union[str, Any] = start_index while not lines[end_index].startswith(__lowercase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | UpperCamelCase = '''Model|Encoder|Decoder|ForConditionalGeneration''' # Regexes that match TF/Flax/PT model names. UpperCamelCase = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') UpperCamelCase = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. UpperCamelCase = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase = direct_transformers_import(TRANSFORMERS_PATH) def SCREAMING_SNAKE_CASE( __lowercase ) -> Any: A: Union[str, Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __lowercase ) return [m.group(0 ) for m in matches] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]: A: List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__lowercase ) A: Union[str, Any] = (width - text_length) // 2 A: Union[str, Any] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def SCREAMING_SNAKE_CASE( ) -> Optional[int]: A: Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES A: Optional[int] = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } A: str = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. A: Dict = collections.defaultdict(__lowercase ) A: Dict = collections.defaultdict(__lowercase ) A: str = collections.defaultdict(__lowercase ) A: List[Any] = collections.defaultdict(__lowercase ) A: str = collections.defaultdict(__lowercase ) # Let's lookup through all transformers object (once). for attr_name in dir(__lowercase ): A: Any = None if attr_name.endswith('''Tokenizer''' ): A: List[str] = slow_tokenizers A: Tuple = attr_name[:-9] elif attr_name.endswith('''TokenizerFast''' ): A: Union[str, Any] = fast_tokenizers A: int = attr_name[:-1_3] elif _re_tf_models.match(__lowercase ) is not None: A: List[Any] = tf_models A: List[Any] = _re_tf_models.match(__lowercase ).groups()[0] elif _re_flax_models.match(__lowercase ) is not None: A: int = flax_models A: Optional[int] = _re_flax_models.match(__lowercase ).groups()[0] elif _re_pt_models.match(__lowercase ) is not None: A: Optional[Any] = pt_models A: Tuple = _re_pt_models.match(__lowercase ).groups()[0] if lookup_dict is not None: while len(__lowercase ) > 0: if attr_name in model_name_to_prefix.values(): A: Dict = True break # Try again after removing the last word in the name A: Dict = ''''''.join(camel_case_split(__lowercase )[:-1] ) # Let's build that table! A: List[Any] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) A: int = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support'''] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). A: Tuple = [len(__lowercase ) + 2 for c in columns] A: List[str] = max([len(__lowercase ) for name in model_names] ) + 2 # Build the table per se A: Union[str, Any] = '''|''' + '''|'''.join([_center_text(__lowercase , __lowercase ) for c, w in zip(__lowercase , __lowercase )] ) + '''|\n''' # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n" A: Optional[int] = {True: '''✅''', False: '''❌'''} for name in model_names: A: Union[str, Any] = model_name_to_prefix[name] A: Tuple = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__lowercase , __lowercase ) for l, w in zip(__lowercase , __lowercase )] ) + "|\n" return table def SCREAMING_SNAKE_CASE( __lowercase=False ) -> List[Any]: A , A , A , A: List[Any] = _find_text_in_file( filename=os.path.join(__lowercase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , ) A: Optional[int] = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__lowercase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( '''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') UpperCamelCase = parser.parse_args() check_model_table(args.fix_and_overwrite)
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = '''▁''' UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''} UpperCamelCase = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } UpperCamelCase = { '''facebook/nllb-200-distilled-600M''': 1024, } # fmt: off UpperCamelCase = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : str = VOCAB_FILES_NAMES UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : List[int] = [] UpperCamelCase_ : List[int] = [] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : int="<pad>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<mask>" , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[str]=False , **SCREAMING_SNAKE_CASE_ : Any , ) -> str: '''simple docstring''' A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token A: str = {} if sp_model_kwargs is None else sp_model_kwargs A: Any = legacy_behaviour super().__init__( bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) ) A: Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token A: Union[str, Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A: Optional[Any] = 1 A: Optional[Any] = len(self.sp_model ) A: List[str] = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE_ ) } A: str = {v: k for k, v in self.lang_code_to_id.items()} A: List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) A: List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} A: str = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) A: Union[str, Any] = src_lang if src_lang is not None else '''eng_Latn''' A: Optional[Any] = self.lang_code_to_id[self._src_lang] A: Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ) -> str: '''simple docstring''' A: Dict = self.__dict__.copy() A: Optional[Any] = None A: Dict = self.sp_model.serialized_model_proto() return state def __setstate__( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: '''simple docstring''' A: str = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A: List[str] = {} A: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _snake_case ( self : List[Any] ) -> List[Any]: '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _snake_case ( self : Optional[int] ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> None: '''simple docstring''' A: Optional[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = [1] * len(self.prefix_tokens ) A: Any = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: List[Any] = [self.sep_token_id] A: int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[str] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) A: Optional[int] = src_lang A: Union[str, Any] = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: int = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) A: List[Any] = tgt_lang_id return inputs def _snake_case ( self : List[Any] ) -> Any: '''simple docstring''' A: List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A: Dict = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Tuple: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]: '''simple docstring''' A: str = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip() return out_string def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: str = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi: A: Dict = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str = "eng_Latn" , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> BatchEncoding: '''simple docstring''' A: List[Any] = src_lang A: Optional[Any] = tgt_lang return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def _snake_case ( self : Optional[Any] ) -> str: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' A: Dict = self.lang_code_to_id[src_lang] if self.legacy_behaviour: A: Optional[Any] = [] A: Any = [self.eos_token_id, self.cur_lang_code] else: A: Optional[int] = [self.cur_lang_code] A: Dict = [self.eos_token_id] def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> None: '''simple docstring''' A: str = self.lang_code_to_id[lang] if self.legacy_behaviour: A: List[Any] = [] A: int = [self.eos_token_id, self.cur_lang_code] else: A: Dict = [self.cur_lang_code] A: List[Any] = [self.eos_token_id]
334
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' pass class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None: '''simple docstring''' A: Any = data A: Node | None = None def __iter__( self : Optional[int] ) -> List[str]: '''simple docstring''' A: List[str] = self A: Dict = [] while node: if node in visited: raise ContainsLoopError visited.append(SCREAMING_SNAKE_CASE_ ) yield node.data A: str = node.next_node @property def _snake_case ( self : List[str] ) -> bool: '''simple docstring''' try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCamelCase = Node(1) UpperCamelCase = Node(2) UpperCamelCase = Node(3) UpperCamelCase = Node(4) print(root_node.has_loop) # False UpperCamelCase = root_node.next_node print(root_node.has_loop) # True UpperCamelCase = Node(5) UpperCamelCase = Node(6) UpperCamelCase = Node(5) UpperCamelCase = Node(6) print(root_node.has_loop) # False UpperCamelCase = Node(1) print(root_node.has_loop) # False
334
1
'''simple docstring''' import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' A: List[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : List[Any] ) -> List[Any]: '''simple docstring''' A: int = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : str ) -> Any: '''simple docstring''' A: Optional[Any] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' A: List[str] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : str ) -> List[Any]: '''simple docstring''' A: Dict = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : str ) -> Optional[int]: '''simple docstring''' A: List[Any] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] A: int = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A: Any = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] A: int = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: Union[str, Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] A: List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Dict ) -> List[str]: '''simple docstring''' A: Optional[int] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] A: Optional[Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : List[Any] ) -> Tuple: '''simple docstring''' A: List[str] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] A: Optional[int] = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' A: List[Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] A: int = '''fp16''' self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A: List[str] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] A: Tuple = '''fp16''' self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
334
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase = 4 ) -> list[list[int]]: A: Tuple = abs(__lowercase ) or 4 return [[1 + x + y * row_size for x in range(__lowercase )] for y in range(__lowercase )] def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(transpose(__lowercase ) ) # OR.. transpose(reverse_column(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(reverse_column(__lowercase ) ) # OR.. reverse_column(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_column(transpose(__lowercase ) ) # OR.. transpose(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Union[str, Any] = [list(__lowercase ) for x in zip(*__lowercase )] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[int] = matrix[::-1] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[Any] = [x[::-1] for x in matrix] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> None: for i in matrix: print(*__lowercase ) if __name__ == "__main__": UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 90 counterclockwise:\n''') print_matrix(rotate_aa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 180:\n''') print_matrix(rotate_aaa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 270 counterclockwise:\n''') print_matrix(rotate_aaa(matrix))
334
1
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase = 1 , __lowercase = 1 , __lowercase = 1.0E4 , __lowercase = False , __lowercase = 1.0 , ) -> jnp.ndarray: assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" A: Optional[int] = float(embedding_dim // 2 ) A: Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) A: str = min_timescale * jnp.exp(jnp.arange(__lowercase , dtype=jnp.floataa ) * -log_timescale_increment ) A: str = jnp.expand_dims(__lowercase , 1 ) * jnp.expand_dims(__lowercase , 0 ) # scale embeddings A: Optional[int] = scale * emb if flip_sin_to_cos: A: Union[str, Any] = jnp.concatenate([jnp.cos(__lowercase ), jnp.sin(__lowercase )] , axis=1 ) else: A: List[str] = jnp.concatenate([jnp.sin(__lowercase ), jnp.cos(__lowercase )] , axis=1 ) A: Dict = jnp.reshape(__lowercase , [jnp.shape(__lowercase )[0], embedding_dim] ) return signal class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' UpperCamelCase_ : int = 32 UpperCamelCase_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict: '''simple docstring''' A: Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(SCREAMING_SNAKE_CASE_ ) A: List[Any] = nn.silu(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(SCREAMING_SNAKE_CASE_ ) return temb class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' UpperCamelCase_ : int = 32 UpperCamelCase_ : bool = False UpperCamelCase_ : float = 1 @nn.compact def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]: '''simple docstring''' return get_sinusoidal_embeddings( SCREAMING_SNAKE_CASE_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
334
'''simple docstring''' from __future__ import annotations import numpy as np def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: return np.maximum(0 , __lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
334
1
'''simple docstring''' import baseaa def SCREAMING_SNAKE_CASE( __lowercase ) -> bytes: return baseaa.aaaencode(string.encode('''utf-8''' ) ) def SCREAMING_SNAKE_CASE( __lowercase ) -> str: return baseaa.aaadecode(__lowercase ).decode('''utf-8''' ) if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) # TODO Update this UpperCamelCase = { '''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''', # See all ESM models at https://huggingface.co/models?filter=esm } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : List[Any] = """esm""" def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : Any=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=30_72 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : int=10_26 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1E-12 , SCREAMING_SNAKE_CASE_ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: Dict = vocab_size A: List[Any] = hidden_size A: int = num_hidden_layers A: List[Any] = num_attention_heads A: Union[str, Any] = intermediate_size A: Optional[int] = hidden_dropout_prob A: Union[str, Any] = attention_probs_dropout_prob A: List[str] = max_position_embeddings A: Any = initializer_range A: Union[str, Any] = layer_norm_eps A: Any = position_embedding_type A: List[str] = use_cache A: Dict = emb_layer_norm_before A: Dict = token_dropout A: List[Any] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) A: List[Any] = EsmFoldConfig() elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: List[str] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) A: List[Any] = get_default_vocab_list() else: A: Union[str, Any] = vocab_list else: A: Optional[int] = None A: Optional[Any] = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , SCREAMING_SNAKE_CASE_ ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def _snake_case ( self : str ) -> int: '''simple docstring''' A: List[Any] = super().to_dict() if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ): A: int = self.esmfold_config.to_dict() return output @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : str = None UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : bool = False UpperCamelCase_ : bool = False UpperCamelCase_ : float = 0 UpperCamelCase_ : bool = True UpperCamelCase_ : bool = False UpperCamelCase_ : int = 128 UpperCamelCase_ : "TrunkConfig" = None def _snake_case ( self : List[Any] ) -> int: '''simple docstring''' if self.trunk is None: A: Optional[Any] = TrunkConfig() elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ): A: List[Any] = TrunkConfig(**self.trunk ) def _snake_case ( self : str ) -> int: '''simple docstring''' A: Optional[Any] = asdict(self ) A: List[str] = self.trunk.to_dict() return output @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : int = 48 UpperCamelCase_ : int = 1024 UpperCamelCase_ : int = 128 UpperCamelCase_ : int = 32 UpperCamelCase_ : int = 32 UpperCamelCase_ : int = 32 UpperCamelCase_ : float = 0 UpperCamelCase_ : float = 0 UpperCamelCase_ : bool = False UpperCamelCase_ : int = 4 UpperCamelCase_ : Optional[int] = 128 UpperCamelCase_ : "StructureModuleConfig" = None def _snake_case ( self : str ) -> str: '''simple docstring''' if self.structure_module is None: A: Dict = StructureModuleConfig() elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ): A: str = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" ) A: Any = self.sequence_state_dim // self.sequence_head_width A: Tuple = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" ) if self.dropout >= 0.4: raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" ) def _snake_case ( self : List[str] ) -> int: '''simple docstring''' A: int = asdict(self ) A: Tuple = self.structure_module.to_dict() return output @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : int = 384 UpperCamelCase_ : int = 128 UpperCamelCase_ : int = 16 UpperCamelCase_ : int = 128 UpperCamelCase_ : int = 12 UpperCamelCase_ : int = 4 UpperCamelCase_ : int = 8 UpperCamelCase_ : float = 0.1 UpperCamelCase_ : int = 8 UpperCamelCase_ : int = 1 UpperCamelCase_ : int = 2 UpperCamelCase_ : int = 7 UpperCamelCase_ : int = 10 UpperCamelCase_ : float = 1e-8 UpperCamelCase_ : float = 1e5 def _snake_case ( self : int ) -> str: '''simple docstring''' return asdict(self ) def SCREAMING_SNAKE_CASE( ) -> Tuple: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
334
'''simple docstring''' from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> float: def get_matched_characters(__lowercase , __lowercase ) -> str: A: Union[str, Any] = [] A: Optional[int] = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): A: Optional[int] = int(max(0 , i - limit ) ) A: int = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(__lowercase ) A: Any = F"""{_stra[0:_stra.index(__lowercase )]} {_stra[_stra.index(__lowercase ) + 1:]}""" return "".join(__lowercase ) # matching characters A: Any = get_matched_characters(__lowercase , __lowercase ) A: Dict = get_matched_characters(__lowercase , __lowercase ) A: Union[str, Any] = len(__lowercase ) # transposition A: Optional[int] = ( len([(ca, ca) for ca, ca in zip(__lowercase , __lowercase ) if ca != ca] ) // 2 ) if not match_count: A: List[Any] = 0.0 else: A: List[Any] = ( 1 / 3 * ( match_count / len(__lowercase ) + match_count / len(__lowercase ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters A: Union[str, Any] = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('''hello''', '''world'''))
334
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' @register_to_config def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 6_55_36 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Tuple: '''simple docstring''' super().__init__() A: Optional[Any] = sample_size # time if time_embedding_type == "fourier": A: Tuple = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ) A: List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": A: str = Timesteps( block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ ) A: Any = block_out_channels[0] if use_timestep_embedding: A: Optional[Any] = block_out_channels[0] * 4 A: List[Any] = TimestepEmbedding( in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , ) A: Optional[Any] = nn.ModuleList([] ) A: str = None A: str = nn.ModuleList([] ) A: Tuple = None # down A: Any = in_channels for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = output_channel A: List[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels A: List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[int] = get_down_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid A: Union[str, Any] = get_mid_block( SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , ) # up A: Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) ) A: List[str] = reversed_block_out_channels[0] if out_block_type is None: A: int = out_channels else: A: Union[str, Any] = block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: List[Any] = output_channel A: int = ( reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels ) A: Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[Any] = get_up_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) A: Any = output_channel # out A: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) A: Optional[int] = get_out_block( out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[UNetaDOutput, Tuple]: '''simple docstring''' A: Any = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): A: Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: A: List[str] = timesteps[None].to(sample.device ) A: int = self.time_proj(SCREAMING_SNAKE_CASE_ ) if self.config.use_timestep_embedding: A: List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ ) else: A: str = timestep_embed[..., None] A: Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) A: Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down A: List[str] = () for downsample_block in self.down_blocks: A , A: Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: A: Dict = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): A: List[Any] = down_block_res_samples[-1:] A: List[str] = down_block_res_samples[:-1] A: Optional[int] = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) # 5. post-process if self.out_block: A: Any = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def SCREAMING_SNAKE_CASE( ) -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
334
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None: '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: Tuple = [label.strip() for label in labels.split(''',''' ) if label.strip()] return labels def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> Dict: '''simple docstring''' if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: raise ValueError('''You must include at least one label and at least one sequence.''' ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( '''The provided hypothesis_template "{}" was not able to be formatted with the target labels. ''' '''Make sure the passed template includes formatting syntax such as {{}} where the label should go.''' ).format(SCREAMING_SNAKE_CASE_ ) ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: Optional[int] = [sequences] A: str = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(SCREAMING_SNAKE_CASE_ )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=ZeroShotClassificationArgumentHandler() , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any: '''simple docstring''' A: Any = args_parser super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.entailment_id == -1: logger.warning( '''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to ''' '''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' ) @property def _snake_case ( self : Union[str, Any] ) -> int: '''simple docstring''' for label, ind in self.model.config.labelaid.items(): if label.lower().startswith('''entail''' ): return ind return -1 def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Any=TruncationStrategy.ONLY_FIRST , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]: '''simple docstring''' A: Optional[int] = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( '''Tokenizer was not supporting padding necessary for zero-shot, attempting to use ''' ''' `pad_token=eos_token`''' ) A: int = self.tokenizer.eos_token try: A: Any = self.tokenizer( SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , ) except Exception as e: if "too short" in str(SCREAMING_SNAKE_CASE_ ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. A: str = self.tokenizer( SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict ) -> Dict: '''simple docstring''' if kwargs.get('''multi_class''' , SCREAMING_SNAKE_CASE_ ) is not None: A: Dict = kwargs['''multi_class'''] logger.warning( '''The `multi_class` argument has been deprecated and renamed to `multi_label`. ''' '''`multi_class` will be removed in a future version of Transformers.''' ) A: Dict = {} if "candidate_labels" in kwargs: A: Optional[int] = self._args_parser._parse_labels(kwargs['''candidate_labels'''] ) if "hypothesis_template" in kwargs: A: Tuple = kwargs['''hypothesis_template'''] A: Optional[Any] = {} if "multi_label" in kwargs: A: Union[str, Any] = kwargs['''multi_label'''] return preprocess_params, {}, postprocess_params def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> int: '''simple docstring''' if len(SCREAMING_SNAKE_CASE_ ) == 0: pass elif len(SCREAMING_SNAKE_CASE_ ) == 1 and "candidate_labels" not in kwargs: A: Optional[Any] = args[0] else: raise ValueError(f"""Unable to understand extra arguments {args}""" ) return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]="This example is {}." ) -> Dict: '''simple docstring''' A , A: Optional[int] = self._args_parser(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for i, (candidate_label, sequence_pair) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): A: Tuple = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(SCREAMING_SNAKE_CASE_ ) - 1, **model_input, } def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]: '''simple docstring''' A: Any = inputs['''candidate_label'''] A: Tuple = inputs['''sequence'''] A: List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names} A: List[str] = self.model(**SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = { '''candidate_label''': candidate_label, '''sequence''': sequence, '''is_last''': inputs['''is_last'''], **outputs, } return model_outputs def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False ) -> Dict: '''simple docstring''' A: List[str] = [outputs['''candidate_label'''] for outputs in model_outputs] A: int = [outputs['''sequence'''] for outputs in model_outputs] A: Tuple = np.concatenate([output['''logits'''].numpy() for output in model_outputs] ) A: str = logits.shape[0] A: str = len(SCREAMING_SNAKE_CASE_ ) A: List[str] = N // n A: Union[str, Any] = logits.reshape((num_sequences, n, -1) ) if multi_label or len(SCREAMING_SNAKE_CASE_ ) == 1: # softmax over the entailment vs. contradiction dim for each label independently A: Union[str, Any] = self.entailment_id A: int = -1 if entailment_id == 0 else 0 A: Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]] A: int = np.exp(SCREAMING_SNAKE_CASE_ ) / np.exp(SCREAMING_SNAKE_CASE_ ).sum(-1 , keepdims=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels A: List[Any] = reshaped_outputs[..., self.entailment_id] A: Any = np.exp(SCREAMING_SNAKE_CASE_ ) / np.exp(SCREAMING_SNAKE_CASE_ ).sum(-1 , keepdims=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
334
'''simple docstring''' from collections import deque class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' A: Union[str, Any] = process_name # process name A: List[str] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A: Dict = arrival_time A: Optional[Any] = burst_time # remaining burst time A: Any = 0 # total time of the process wait in ready queue A: Any = 0 # time from arrival time to completion time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None: '''simple docstring''' A: Dict = number_of_queues # time slice of queues that round robin algorithm applied A: int = time_slices # unfinished process is in this ready_queue A: Tuple = queue # current time A: int = current_time # finished process is in this sequence queue A: deque[Process] = deque() def _snake_case ( self : List[Any] ) -> list[str]: '''simple docstring''' A: str = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Optional[int] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Any = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: List[Any] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]: '''simple docstring''' return [q.burst_time for q in queue] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int: '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of finished process while len(SCREAMING_SNAKE_CASE_ ) != 0: A: Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A: Any = 0 # set the process's turnaround time because it is finished A: int = self.current_time - cp.arrival_time # set the completion time A: List[str] = self.current_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): A: Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A: Optional[Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(SCREAMING_SNAKE_CASE_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A: int = 0 # set the finish time A: Union[str, Any] = self.current_time # update the process' turnaround time because it is finished A: Tuple = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _snake_case ( self : Optional[Any] ) -> deque[Process]: '''simple docstring''' for i in range(self.number_of_queues - 1 ): A , A: Optional[Any] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0) UpperCamelCase = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( f'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( f'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
334
1
'''simple docstring''' from __future__ import annotations UpperCamelCase = list[list[int]] # assigning initial values to the grid UpperCamelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCamelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def SCREAMING_SNAKE_CASE( __lowercase ) -> Matrix | None: if location := find_empty_location(__lowercase ): A , A: Any = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 1_0 ): if is_safe(__lowercase , __lowercase , __lowercase , __lowercase ): A: int = digit if sudoku(__lowercase ) is not None: return grid A: Dict = 0 return None def SCREAMING_SNAKE_CASE( __lowercase ) -> None: for row in grid: for cell in row: print(__lowercase , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') UpperCamelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
334
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger() @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> int: '''simple docstring''' A: List[str] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(SCREAMING_SNAKE_CASE_ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Dict: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(SCREAMING_SNAKE_CASE_ ) [x.remove() for x in self.handles] return self @property def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : nn.Module UpperCamelCase_ : int = 0 UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]: '''simple docstring''' A: Dict = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized A: Tuple = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) ) A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise Exception( f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while""" f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" ) for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = True ) -> Any: print(F"""Converting {name}...""" ) with torch.no_grad(): A: Union[str, Any] = timm.create_model(__lowercase , pretrained=__lowercase ).eval() A: List[str] = ResNetForImageClassification(__lowercase ).eval() A: int = ModuleTransfer(src=__lowercase , dest=__lowercase ) A: List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(__lowercase ) assert torch.allclose(from_model(__lowercase ) , our_model(__lowercase ).logits ), "The model logits don't match the original one." A: str = F"""resnet{'-'.join(name.split('resnet' ) )}""" print(__lowercase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowercase , ) # we can use the convnext one A: Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , ) print(F"""Pushed {checkpoint_name}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> List[Any]: A: Union[str, Any] = '''imagenet-1k-id2label.json''' A: Union[str, Any] = 1_0_0_0 A: Optional[int] = (1, num_labels) A: Dict = '''huggingface/label-files''' A: Any = num_labels A: Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Optional[int] = {int(__lowercase ): v for k, v in idalabel.items()} A: Optional[int] = idalabel A: List[str] = {v: k for k, v in idalabel.items()} A: str = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase ) A: Optional[Any] = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(__lowercase , names_to_config[model_name] , __lowercase , __lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__lowercase , __lowercase , __lowercase , __lowercase ) return config, expected_shape if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if p < 2: raise ValueError('''p should not be less than 2!''' ) elif p == 2: return True A: List[Any] = 4 A: int = (1 << p) - 1 for _ in range(p - 2 ): A: Optional[int] = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
334
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str | Literal[False]: A: List[str] = list(__lowercase ) A: Optional[Any] = list(__lowercase ) A: int = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count += 1 A: Optional[Any] = '''_''' if count > 1: return False else: return "".join(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[str]: A: Any = [] while True: A: Dict = ['''$'''] * len(__lowercase ) A: Union[str, Any] = [] for i in range(len(__lowercase ) ): for j in range(i + 1 , len(__lowercase ) ): A: Any = compare_string(binary[i] , binary[j] ) if k is False: A: Any = '''*''' A: List[Any] = '''*''' temp.append('''X''' ) for i in range(len(__lowercase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__lowercase ) == 0: return pi A: List[Any] = list(set(__lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]: A: Optional[int] = [] for minterm in minterms: A: Optional[int] = '''''' for _ in range(__lowercase ): A: List[Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(__lowercase ) return temp def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> bool: A: Union[str, Any] = list(__lowercase ) A: Union[str, Any] = list(__lowercase ) A: Optional[int] = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[str]: A: List[Any] = [] A: Dict = [0] * len(__lowercase ) for i in range(len(chart[0] ) ): A: List[str] = 0 A: str = -1 for j in range(len(__lowercase ) ): if chart[j][i] == 1: count += 1 A: Any = j if count == 1: A: Any = 1 for i in range(len(__lowercase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__lowercase ) ): A: Optional[int] = 0 temp.append(prime_implicants[i] ) while True: A: Dict = 0 A: Optional[int] = -1 A: Dict = 0 for i in range(len(__lowercase ) ): A: str = chart[i].count(1 ) if count_n > max_n: A: Tuple = count_n A: Optional[Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__lowercase ) ): A: Any = 0 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[int]]: A: str = [[0 for x in range(len(__lowercase ) )] for x in range(len(__lowercase ) )] for i in range(len(__lowercase ) ): A: Tuple = prime_implicants[i].count('''_''' ) for j in range(len(__lowercase ) ): if is_for_table(prime_implicants[i] , binary[j] , __lowercase ): A: Optional[Any] = 1 return chart def SCREAMING_SNAKE_CASE( ) -> None: A: int = int(input('''Enter the no. of variables\n''' ) ) A: Optional[int] = [ float(__lowercase ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] A: List[str] = decimal_to_binary(__lowercase , __lowercase ) A: str = check(__lowercase ) print('''Prime Implicants are:''' ) print(__lowercase ) A: List[Any] = prime_implicant_chart(__lowercase , __lowercase ) A: Any = selection(__lowercase , __lowercase ) print('''Essential Prime Implicants are:''' ) print(__lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
334
1
'''simple docstring''' from collections import deque class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' A: Union[str, Any] = process_name # process name A: List[str] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A: Dict = arrival_time A: Optional[Any] = burst_time # remaining burst time A: Any = 0 # total time of the process wait in ready queue A: Any = 0 # time from arrival time to completion time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None: '''simple docstring''' A: Dict = number_of_queues # time slice of queues that round robin algorithm applied A: int = time_slices # unfinished process is in this ready_queue A: Tuple = queue # current time A: int = current_time # finished process is in this sequence queue A: deque[Process] = deque() def _snake_case ( self : List[Any] ) -> list[str]: '''simple docstring''' A: str = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Optional[int] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Any = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: List[Any] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]: '''simple docstring''' return [q.burst_time for q in queue] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int: '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of finished process while len(SCREAMING_SNAKE_CASE_ ) != 0: A: Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A: Any = 0 # set the process's turnaround time because it is finished A: int = self.current_time - cp.arrival_time # set the completion time A: List[str] = self.current_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): A: Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A: Optional[Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(SCREAMING_SNAKE_CASE_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A: int = 0 # set the finish time A: Union[str, Any] = self.current_time # update the process' turnaround time because it is finished A: Tuple = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _snake_case ( self : Optional[Any] ) -> deque[Process]: '''simple docstring''' for i in range(self.number_of_queues - 1 ): A , A: Optional[Any] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0) UpperCamelCase = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( f'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( f'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
334
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: Tuple = len(__lowercase ) for i in range(length - 1 ): A: Dict = i for k in range(i + 1 , __lowercase ): if collection[k] < collection[least]: A: List[str] = k if least != i: A , A: Tuple = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
334
1
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' A: Union[str, Any] = torch.nn.Linear(10 , 10 ) A: Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 ) A: List[Any] = Accelerator() A: Any = accelerator.prepare(SCREAMING_SNAKE_CASE_ ) try: pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE_ ) ) except Exception as e: self.fail(f"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
334
'''simple docstring''' class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' A: Tuple = None A: Dict = None A: Optional[int] = graph self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: str = len(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = None def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str: '''simple docstring''' if sources is int: A: Union[str, Any] = [sources] if sinks is int: A: Tuple = [sinks] if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0: return A: List[str] = sources[0] A: Optional[int] = sinks[0] # make fake vertex if there are more # than one source or sink if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1: A: Any = 0 for i in sources: max_input_flow += sum(self.graph[i] ) A: Dict = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: A: Optional[Any] = max_input_flow A: Optional[Any] = 0 A: str = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: A: Optional[Any] = max_input_flow A: str = size - 1 def _snake_case ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception('''You need to set maximum flow algorithm before.''' ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: '''simple docstring''' A: Optional[Any] = algorithm(self ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]: '''simple docstring''' A: str = flow_network A: List[str] = flow_network.verticesCount A: Dict = flow_network.sourceIndex A: Any = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that A: str = flow_network.graph A: str = False def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' if not self.executed: self._algorithm() A: str = True def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) # use this to save your result A: Any = -1 def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' if not self.executed: raise Exception('''You should execute algorithm before using its result!''' ) return self.maximum_flow class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]: '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )] A: Any = [0] * self.verticies_count A: Optional[Any] = [0] * self.verticies_count def _snake_case ( self : str ) -> Optional[Any]: '''simple docstring''' A: Any = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule A: str = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list A: Dict = 0 while i < len(SCREAMING_SNAKE_CASE_ ): A: Any = vertices_list[i] A: str = self.heights[vertex_index] self.process_vertex(SCREAMING_SNAKE_CASE_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) ) A: Tuple = 0 else: i += 1 A: Tuple = sum(self.preflow[self.source_index] ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.relabel(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int: '''simple docstring''' A: Optional[int] = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int: '''simple docstring''' A: Optional[Any] = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): A: List[Any] = self.heights[to_index] if min_height is not None: A: int = min_height + 1 if __name__ == "__main__": UpperCamelCase = [0] UpperCamelCase = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network UpperCamelCase = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate UpperCamelCase = flow_network.find_maximum_flow() print(f'maximum flow is {maximum_flow}')
334
1
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : str = GPTSanJapaneseTokenizer UpperCamelCase_ : Union[str, Any] = False UpperCamelCase_ : Dict = {"""do_clean_text""": False, """add_prefix_space""": False} def _snake_case ( self : Any ) -> str: '''simple docstring''' super().setUp() # fmt: off A: Optional[Any] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>'''] # fmt: on A: Union[str, Any] = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀 A: Union[str, Any] = {'''unk_token''': '''<unk>'''} A: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) with open(self.emoji_file , '''w''' ) as emoji_writer: emoji_writer.write(json.dumps(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = '''こんにちは、世界。 \nこんばんは、㔺界。😀''' A: Union[str, Any] = '''こんにちは、世界。 \nこんばんは、世界。😀''' return input_text, output_text def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]: '''simple docstring''' A , A: int = self.get_input_output_texts(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) return text, ids def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' pass # TODO add if relevant def _snake_case ( self : Optional[Any] ) -> Dict: '''simple docstring''' pass # TODO add if relevant def _snake_case ( self : List[str] ) -> str: '''simple docstring''' pass # TODO add if relevant def _snake_case ( self : int ) -> str: '''simple docstring''' A: Optional[Any] = self.get_tokenizer() # Testing tokenization A: Dict = '''こんにちは、世界。 こんばんは、㔺界。''' A: str = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。'''] A: str = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing conversion to ids without special tokens A: int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] A: Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing conversion to ids with special tokens A: int = tokens + [tokenizer.unk_token] A: int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] A: List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A: Tuple = self.get_tokenizer() # Testing tokenization A: List[str] = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。''' A: Tuple = '''こんにちは、、、、世界。こんばんは、、、、世界。''' A: str = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : str ) -> List[str]: '''simple docstring''' A: List[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) # Testing tokenization A: Optional[int] = '''こんにちは、世界。''' A: str = '''こんばんは、㔺界。😀''' A: str = '''こんにちは、世界。こんばんは、世界。😀''' A: Optional[Any] = tokenizer.encode(prefix_text + input_text ) A: Dict = tokenizer.encode('''''' , prefix_text=prefix_text + input_text ) A: str = tokenizer.encode(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : List[Any] ) -> Dict: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) # Testing tokenization A: Tuple = '''こんにちは、世界。''' A: List[Any] = '''こんばんは、㔺界。😀''' A: int = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2 A: str = len(tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) - 2 A: List[Any] = [1] + [0] * (len_prefix + len_text + 1) A: Any = [1] * (len_prefix + len_text + 1) + [0] A: Optional[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1) A: Tuple = tokenizer(prefix_text + input_text ).token_type_ids A: int = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , prefix_text=SCREAMING_SNAKE_CASE_ ).token_type_ids self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' A: Tuple = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) A: Dict = tokenizer.encode('''あンいワ''' ) A: str = tokenizer.encode('''''' , prefix_text='''あンいワ''' ) A: Dict = tokenizer.encode('''いワ''' , prefix_text='''あン''' ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE_ ) , tokenizer.decode(SCREAMING_SNAKE_CASE_ ) ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def _snake_case ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A: Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) A: List[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']] A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) A: str = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) # fmt: off A: Optional[Any] = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]] A: Optional[int] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] A: List[str] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token.token_type_ids , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token.attention_mask , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token_a.input_ids , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token_a.token_type_ids , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(x_token_a.attention_mask , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any ) -> Dict: '''simple docstring''' pass def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' pass
334
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ["""input_features""", """attention_mask"""] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : int=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]: '''simple docstring''' super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = num_mel_bins A: str = do_ceptral_normalize A: int = normalize_means A: List[Any] = normalize_vars A: Any = True def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , ) -> np.ndarray: '''simple docstring''' A: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers A: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) A: List[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> np.ndarray: '''simple docstring''' if normalize_means: A: str = x[:input_length].mean(axis=0 ) A: Dict = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if normalize_vars: A: Tuple = x[:input_length].std(axis=0 ) A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if input_length < x.shape[0]: A: Optional[int] = padding_value # make sure array is in float32 A: Optional[Any] = x.astype(np.floataa ) return x def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]: '''simple docstring''' A: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A: Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) A: Optional[Any] = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): A: int = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A: Any = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A: Union[str, Any] = [raw_speech] # extract fbank features A: str = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech] # convert into correct format for padding A: int = BatchFeature({'''input_features''': features} ) A: int = self.pad( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # make sure list is in array format A: List[str] = padded_inputs.get('''input_features''' ) if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ): A: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features] A: List[Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: A: Dict = ( np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD else None ) A: List[Any] = self.normalize( padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ ) if return_tensors is not None: A: Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return padded_inputs
334
1
'''simple docstring''' import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str=1_00 , SCREAMING_SNAKE_CASE_ : str=13 , SCREAMING_SNAKE_CASE_ : List[str]=30 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , SCREAMING_SNAKE_CASE_ : str=37 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=10 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[str]=[0, 1, 2, 3] , ) -> Union[str, Any]: '''simple docstring''' A: Tuple = parent A: Union[str, Any] = 1_00 A: Union[str, Any] = batch_size A: Optional[int] = image_size A: Union[str, Any] = patch_size A: List[str] = num_channels A: str = is_training A: Union[str, Any] = use_labels A: List[Any] = hidden_size A: int = num_hidden_layers A: Union[str, Any] = num_attention_heads A: Any = intermediate_size A: str = hidden_act A: int = hidden_dropout_prob A: List[str] = attention_probs_dropout_prob A: str = type_sequence_label_size A: Union[str, Any] = initializer_range A: List[str] = scope A: Union[str, Any] = out_indices A: int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A: int = (image_size // patch_size) ** 2 A: List[Any] = num_patches + 1 def _snake_case ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A: str = None A: Tuple = None if self.use_labels: A: int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A: Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A: Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def _snake_case ( self : Dict ) -> Any: '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]: '''simple docstring''' A: List[Any] = BeitModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: List[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict: '''simple docstring''' A: Optional[int] = BeitForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Optional[int] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ) -> Any: '''simple docstring''' A: List[str] = self.type_sequence_label_size A: Optional[int] = BeitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: int = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A: Optional[Any] = 1 A: Any = BeitForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A: List[str] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Any: '''simple docstring''' A: Tuple = self.num_labels A: Union[str, Any] = BeitForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() A: Optional[Any] = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) A: List[Any] = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _snake_case ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' A: Tuple = self.prepare_config_and_inputs() A , A , A , A: Tuple = config_and_inputs A: List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : str = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) UpperCamelCase_ : List[str] = ( { """feature-extraction""": BeitModel, """image-classification""": BeitForImageClassification, """image-segmentation""": BeitForSemanticSegmentation, } if is_torch_available() else {} ) UpperCamelCase_ : List[Any] = False UpperCamelCase_ : int = False UpperCamelCase_ : Optional[Any] = False def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' A: List[Any] = BeitModelTester(self ) A: List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''BEiT does not use inputs_embeds''' ) def _snake_case ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def _snake_case ( self : Union[str, Any] ) -> str: '''simple docstring''' pass def _snake_case ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' A , A: Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A: str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def _snake_case ( self : Tuple ) -> Any: '''simple docstring''' A , A: str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A: Any = model_class(SCREAMING_SNAKE_CASE_ ) A: str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A: str = [*signature.parameters.keys()] A: Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Any: '''simple docstring''' A: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str ) -> List[str]: '''simple docstring''' A: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> Optional[int]: '''simple docstring''' A: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] ) -> List[str]: '''simple docstring''' if not self.model_tester.is_training: return A , A: Any = self.model_tester.prepare_config_and_inputs_for_common() A: List[Any] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(SCREAMING_SNAKE_CASE_ ), BeitForMaskedImageModeling]: continue A: Tuple = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.train() A: int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) A: int = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A , A: int = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A: List[Any] = False A: int = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(SCREAMING_SNAKE_CASE_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue A: Tuple = model_class(SCREAMING_SNAKE_CASE_ ) model.gradient_checkpointing_enable() model.to(SCREAMING_SNAKE_CASE_ ) model.train() A: Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) A: Tuple = model(**SCREAMING_SNAKE_CASE_ ).loss loss.backward() def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' A , A: Any = self.model_tester.prepare_config_and_inputs_for_common() A: Dict = _config_zero_init(SCREAMING_SNAKE_CASE_ ) for model_class in self.all_model_classes: A: List[Any] = model_class(config=SCREAMING_SNAKE_CASE_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @slow def _snake_case ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A: str = BeitModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE( ) -> Any: A: Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self : Any ) -> List[str]: '''simple docstring''' return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: Union[str, Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(SCREAMING_SNAKE_CASE_ ) A: int = self.default_image_processor A: Union[str, Any] = prepare_img() A: int = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE_ ) # prepare bool_masked_pos A: Optional[Any] = torch.ones((1, 1_96) , dtype=torch.bool ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): A: Optional[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_ , bool_masked_pos=SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = outputs.logits # verify the logits A: List[str] = torch.Size((1, 1_96, 81_92) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ ) A: List[str] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-2 ) ) @slow def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' A: List[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(SCREAMING_SNAKE_CASE_ ) A: str = self.default_image_processor A: int = prepare_img() A: Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): A: List[Any] = model(**SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = outputs.logits # verify the logits A: int = torch.Size((1, 10_00) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ ) A: int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) A: int = 2_81 self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: Union[str, Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to( SCREAMING_SNAKE_CASE_ ) A: int = self.default_image_processor A: int = prepare_img() A: Any = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): A: str = model(**SCREAMING_SNAKE_CASE_ ) A: List[Any] = outputs.logits # verify the logits A: List[str] = torch.Size((1, 2_18_41) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ ) A: List[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) A: List[str] = 23_96 self.assertEqual(logits.argmax(-1 ).item() , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' A: Dict = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) A: Optional[int] = model.to(SCREAMING_SNAKE_CASE_ ) A: List[str] = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE_ , size=6_40 , do_center_crop=SCREAMING_SNAKE_CASE_ ) A: int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) A: Union[str, Any] = Image.open(ds[0]['''file'''] ) A: Tuple = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): A: List[str] = model(**SCREAMING_SNAKE_CASE_ ) A: Tuple = outputs.logits # verify the logits A: Union[str, Any] = torch.Size((1, 1_50, 1_60, 1_60) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ ) A: int = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' ) if is_pillow_less_than_a: A: int = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=SCREAMING_SNAKE_CASE_ , ) else: A: Any = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=SCREAMING_SNAKE_CASE_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) ) @slow def _snake_case ( self : Tuple ) -> str: '''simple docstring''' A: Union[str, Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) A: Tuple = model.to(SCREAMING_SNAKE_CASE_ ) A: Tuple = BeitImageProcessor(do_resize=SCREAMING_SNAKE_CASE_ , size=6_40 , do_center_crop=SCREAMING_SNAKE_CASE_ ) A: int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) A: Optional[Any] = Image.open(ds[0]['''file'''] ) A: Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): A: int = model(**SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = outputs.logits.detach().cpu() A: Any = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(5_00, 3_00)] ) A: List[Any] = torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ ) A: List[Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ ) A: List[str] = torch.Size((1_60, 1_60) ) self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
334
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = DebertaTokenizer UpperCamelCase_ : List[str] = True UpperCamelCase_ : int = DebertaTokenizerFast def _snake_case ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A: Optional[int] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] A: int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] A: Union[str, Any] = {'''unk_token''': '''[UNK]'''} A: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]: '''simple docstring''' A: Optional[int] = '''lower newer''' A: str = '''lower newer''' return input_text, output_text def _snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A: str = self.get_tokenizer() A: Any = '''lower newer''' A: Dict = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] A: int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokens + [tokenizer.unk_token] A: int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> Any: '''simple docstring''' A: str = self.get_tokenizer() A: List[str] = tokenizer('''Hello''' , '''World''' ) A: Union[str, Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Tuple ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Any = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) A: int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: int = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: A: List[Any] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) A: Dict = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) A: Any = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) for seq in encoding['''input_ids''']] # fmt: off A: Any = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on A: Optional[int] = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , SCREAMING_SNAKE_CASE_ ) for expected, decoded in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets UpperCamelCase = '''\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } ''' UpperCamelCase = '''\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve ''' UpperCamelCase = ''' Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: "c" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric(\'mauve\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : List[Any]="auto" , SCREAMING_SNAKE_CASE_ : str=-1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.9 , SCREAMING_SNAKE_CASE_ : Dict=5 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_00 , SCREAMING_SNAKE_CASE_ : str="gpt2-large" , SCREAMING_SNAKE_CASE_ : List[str]=-1 , SCREAMING_SNAKE_CASE_ : List[str]=10_24 , SCREAMING_SNAKE_CASE_ : str=25 , SCREAMING_SNAKE_CASE_ : List[Any]=5 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : str=25 , ) -> List[Any]: '''simple docstring''' A: str = compute_mauve( p_text=SCREAMING_SNAKE_CASE_ , q_text=SCREAMING_SNAKE_CASE_ , p_features=SCREAMING_SNAKE_CASE_ , q_features=SCREAMING_SNAKE_CASE_ , p_tokens=SCREAMING_SNAKE_CASE_ , q_tokens=SCREAMING_SNAKE_CASE_ , num_buckets=SCREAMING_SNAKE_CASE_ , pca_max_data=SCREAMING_SNAKE_CASE_ , kmeans_explained_var=SCREAMING_SNAKE_CASE_ , kmeans_num_redo=SCREAMING_SNAKE_CASE_ , kmeans_max_iter=SCREAMING_SNAKE_CASE_ , featurize_model_name=SCREAMING_SNAKE_CASE_ , device_id=SCREAMING_SNAKE_CASE_ , max_text_length=SCREAMING_SNAKE_CASE_ , divergence_curve_discretization_size=SCREAMING_SNAKE_CASE_ , mauve_scaling_factor=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , seed=SCREAMING_SNAKE_CASE_ , ) return out
334
'''simple docstring''' import requests UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def SCREAMING_SNAKE_CASE( __lowercase ) -> None: # fetching a list of articles in json format A: Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page['''articles'''] , 1 ): print(F"""{i}.) {article['title']}""" ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { '''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimesformerModel''', '''TimesformerForVideoClassification''', '''TimesformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } UpperCamelCase = { '''camembert-base''': 512, } UpperCamelCase = '''▁''' class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : int = CamembertTokenizer def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : Any , ) -> Any: '''simple docstring''' A: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Any = vocab_file A: Any = False if not self.vocab_file else True def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: List[str] = [self.cls_token_id] A: List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: List[str] = [self.sep_token_id] A: Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Dict = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
334
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = """pegasus""" UpperCamelCase_ : Tuple = ["""past_key_values"""] UpperCamelCase_ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]=5_02_65 , SCREAMING_SNAKE_CASE_ : str=10_24 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=12 , SCREAMING_SNAKE_CASE_ : Optional[int]=40_96 , SCREAMING_SNAKE_CASE_ : str=16 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : List[str]=40_96 , SCREAMING_SNAKE_CASE_ : Optional[int]=16 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=10_24 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : int=1 , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> Tuple: '''simple docstring''' A: int = vocab_size A: Dict = max_position_embeddings A: List[str] = d_model A: Tuple = encoder_ffn_dim A: Any = encoder_layers A: List[str] = encoder_attention_heads A: str = decoder_ffn_dim A: List[str] = decoder_layers A: Union[str, Any] = decoder_attention_heads A: Tuple = dropout A: Optional[Any] = attention_dropout A: Dict = activation_dropout A: List[str] = activation_function A: List[Any] = init_std A: List[Any] = encoder_layerdrop A: Optional[int] = decoder_layerdrop A: str = use_cache A: int = encoder_layers A: Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) @property def _snake_case ( self : Any ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def _snake_case ( self : str ) -> int: '''simple docstring''' return self.d_model
334
'''simple docstring''' import os from distutils.util import strtobool def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]: for e in env_keys: A: Dict = int(os.environ.get(__lowercase , -1 ) ) if val >= 0: return val return default def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> List[str]: A: str = os.environ.get(__lowercase , str(__lowercase ) ) return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int... def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="no" ) -> str: A: Optional[int] = os.environ.get(__lowercase , str(__lowercase ) ) return value
334
1
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> float: if days_between_payments <= 0: raise ValueError('''days_between_payments must be > 0''' ) if daily_interest_rate < 0: raise ValueError('''daily_interest_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * daily_interest_rate * days_between_payments def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , ) -> float: if number_of_compounding_periods <= 0: raise ValueError('''number_of_compounding_periods must be > 0''' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , ) -> float: if number_of_years <= 0: raise ValueError('''number_of_years must be > 0''' ) if nominal_annual_percentage_rate < 0: raise ValueError('''nominal_annual_percentage_rate must be >= 0''' ) if principal <= 0: raise ValueError('''principal must be > 0''' ) return compound_interest( __lowercase , nominal_annual_percentage_rate / 3_6_5 , number_of_years * 3_6_5 ) if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() UpperCamelCase = logging.get_logger('''transformers.models.encodec''') UpperCamelCase = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } UpperCamelCase = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } UpperCamelCase = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } UpperCamelCase = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } UpperCamelCase = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } UpperCamelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } UpperCamelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } UpperCamelCase = [] UpperCamelCase = [] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict: for attribute in key.split('''.''' ): A: Union[str, Any] = getattr(__lowercase , __lowercase ) if weight_type is not None: A: Tuple = getattr(__lowercase , __lowercase ).shape else: A: str = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": A: Dict = value elif weight_type == "weight_g": A: Tuple = value elif weight_type == "weight_v": A: Any = value elif weight_type == "bias": A: str = value elif weight_type == "running_mean": A: List[Any] = value elif weight_type == "running_var": A: Dict = value elif weight_type == "num_batches_tracked": A: List[str] = value elif weight_type == "weight_ih_l0": A: Dict = value elif weight_type == "weight_hh_l0": A: Optional[int] = value elif weight_type == "bias_ih_l0": A: List[Any] = value elif weight_type == "bias_hh_l0": A: str = value elif weight_type == "weight_ih_l1": A: Optional[int] = value elif weight_type == "weight_hh_l1": A: int = value elif weight_type == "bias_ih_l1": A: Optional[Any] = value elif weight_type == "bias_hh_l1": A: str = value else: A: Optional[int] = value logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: A , A: Any = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: A: Any = [] if model_name == "encodec_24khz" or "encodec_32khz": A: List[str] = MAPPING_24K elif model_name == "encodec_48khz": A: List[Any] = MAPPING_48K else: raise ValueError(F"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(__lowercase , __lowercase ): logger.info(F"""{name} was ignored""" ) continue A: Optional[int] = False for key, mapped_key in MAPPING.items(): if "*" in key: A , A: Optional[int] = key.split('''.*.''' ) if prefix in name and suffix in name: A: str = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue A: Optional[Any] = True if "*" in mapped_key: A: Any = name.split(__lowercase )[0].split('''.''' )[-2] A: Tuple = mapped_key.replace('''*''' , __lowercase ) if "weight_g" in name: A: str = '''weight_g''' elif "weight_v" in name: A: List[Any] = '''weight_v''' elif "weight_ih_l0" in name: A: Dict = '''weight_ih_l0''' elif "weight_hh_l0" in name: A: int = '''weight_hh_l0''' elif "bias_ih_l0" in name: A: Union[str, Any] = '''bias_ih_l0''' elif "bias_hh_l0" in name: A: Tuple = '''bias_hh_l0''' elif "weight_ih_l1" in name: A: int = '''weight_ih_l1''' elif "weight_hh_l1" in name: A: Optional[Any] = '''weight_hh_l1''' elif "bias_ih_l1" in name: A: Dict = '''bias_ih_l1''' elif "bias_hh_l1" in name: A: str = '''bias_hh_l1''' elif "bias" in name: A: Union[str, Any] = '''bias''' elif "weight" in name: A: Dict = '''weight''' elif "running_mean" in name: A: Tuple = '''running_mean''' elif "running_var" in name: A: Any = '''running_var''' elif "num_batches_tracked" in name: A: str = '''num_batches_tracked''' else: A: Tuple = None set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) continue if not is_used: unused_weights.append(__lowercase ) logger.warning(F"""Unused weights: {unused_weights}""" ) @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ) -> Dict: if config_path is not None: A: Tuple = EncodecConfig.from_pretrained(__lowercase ) else: A: Union[str, Any] = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A: Union[str, Any] = [8, 5, 4, 4] A: Dict = [2.2] A: List[Any] = 6_4 A: Optional[Any] = 3_2_0_0_0 A: List[Any] = 2_0_4_8 A: Optional[Any] = False A: int = False A: Union[str, Any] = False elif model_name == "encodec_48khz": A: Optional[int] = [8, 5, 4, 2] A: List[Any] = [3.0, 6.0, 1_2.0, 2_4.0] A: List[Any] = 4_8_0_0_0 A: int = 2 A: List[Any] = False A: Any = '''time_group_norm''' A: Optional[Any] = True A: Any = 1.0 A: Any = 0.0_1 else: raise ValueError(F"""Unknown model name: {model_name}""" ) A: str = EncodecModel(__lowercase ) A: Optional[Any] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(__lowercase ) A: Union[str, Any] = torch.load(__lowercase ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A: Optional[int] = original_checkpoint['''best_state'''] recursively_load_weights(__lowercase , __lowercase , __lowercase ) model.save_pretrained(__lowercase ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(__lowercase ) model.push_to_hub(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) UpperCamelCase = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> list: A: List[Any] = False while is_sorted is False: # Until all the indices are traversed keep looping A: Optional[Any] = True for i in range(0 , len(__lowercase ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: A , A: List[Any] = input_list[i + 1], input_list[i] # swapping if elements not in order A: List[Any] = False for i in range(1 , len(__lowercase ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: A , A: int = input_list[i + 1], input_list[i] # swapping if elements not in order A: Any = False return input_list if __name__ == "__main__": print('''Enter list to be sorted''') UpperCamelCase = [int(x) for x in input().split()] # inputing elements of the list in one line UpperCamelCase = odd_even_sort(input_list) print('''The sorted list is''') print(sorted_list)
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' A: int = SMALL_MODEL_IDENTIFIER A: List[str] = '''pt''' A: Optional[Any] = '''tf''' def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' A: Any = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A: Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=SCREAMING_SNAKE_CASE_ ) model_tf.save_pretrained(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A: List[str] = '''mock_framework''' # Framework provided - return whatever the user provides A: Tuple = FeaturesManager.determine_framework(self.test_model , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(SCREAMING_SNAKE_CASE_ ) A: List[str] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any ) -> str: '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(SCREAMING_SNAKE_CASE_ ) A: int = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(SCREAMING_SNAKE_CASE_ ) A: Dict = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str ) -> int: '''simple docstring''' A: Any = MagicMock(return_value=SCREAMING_SNAKE_CASE_ ) with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE_ ): A: Optional[int] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow A: Dict = MagicMock(return_value=SCREAMING_SNAKE_CASE_ ) with patch('''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE_ ): A: List[str] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_tf ) # Both in environment -> use PyTorch A: Dict = MagicMock(return_value=SCREAMING_SNAKE_CASE_ ) A: Any = MagicMock(return_value=SCREAMING_SNAKE_CASE_ ) with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE_ ), patch( '''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE_ ): A: Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt ) # Both not in environment -> raise error A: List[Any] = MagicMock(return_value=SCREAMING_SNAKE_CASE_ ) A: Any = MagicMock(return_value=SCREAMING_SNAKE_CASE_ ) with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE_ ), patch( '''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE_ ): with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: Dict = FeaturesManager.determine_framework(self.test_model )
334
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for data in source_data: for i, el in enumerate(__lowercase ): if len(__lowercase ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__lowercase ) ) return data_lists def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: list[list[float]] = [] for dlist, weight in zip(__lowercase , __lowercase ): A: List[str] = min(__lowercase ) A: Union[str, Any] = max(__lowercase ) A: list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: A: List[str] = F"""Invalid weight of {weight:f} provided""" raise ValueError(__lowercase ) score_lists.append(__lowercase ) return score_lists def SCREAMING_SNAKE_CASE( __lowercase ) -> list[float]: A: list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__lowercase ): A: str = final_scores[j] + ele return final_scores def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list[list[float]]: A: Any = get_data(__lowercase ) A: str = calculate_each_score(__lowercase , __lowercase ) A: int = generate_final_scores(__lowercase ) # append scores to source data for i, ele in enumerate(__lowercase ): source_data[i].append(__lowercase ) return source_data
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''vocab_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': 512, '''facebook/dpr-ctx_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': 512, '''facebook/dpr-question_encoder-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': 512, '''facebook/dpr-reader-multiset-base''': 512, } UpperCamelCase = { '''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True}, } UpperCamelCase = { '''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True}, '''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True}, } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = DPRContextEncoderTokenizer class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Dict = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Optional[int] = DPRQuestionEncoderTokenizer UpperCamelCase = collections.namedtuple( '''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text'''] ) UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits''']) UpperCamelCase = R''' Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `\'tf\'`: Return TensorFlow `tf.constant` objects. - `\'pt\'`: Return PyTorch `torch.Tensor` objects. - `\'np\'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer\'s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. ''' @add_start_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ : '''simple docstring''' def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) elif titles is None or texts is None: A: Union[str, Any] = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Union[str, Any] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles] A: Optional[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts] A: str = len(SCREAMING_SNAKE_CASE_ ) A: List[Any] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages assert len(SCREAMING_SNAKE_CASE_ ) == len( SCREAMING_SNAKE_CASE_ ), f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts.""" A: Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: Dict = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids'''] A: str = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] } if return_attention_mask is not False: A: Union[str, Any] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) A: Optional[Any] = attention_mask return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : BatchEncoding , SCREAMING_SNAKE_CASE_ : DPRReaderOutput , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : int = 64 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Any = reader_input['''input_ids'''] A , A , A: str = reader_output[:3] A: str = len(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ ) A: List[DPRReaderOutput] = [] for doc_id in sorted_docs: A: List[str] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence A: Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: A: Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: A: int = len(SCREAMING_SNAKE_CASE_ ) A: Dict = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(SCREAMING_SNAKE_CASE_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ) -> List[DPRSpanPrediction]: '''simple docstring''' A: Union[str, Any] = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) A: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ ) A: Dict = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]""" A: int = end_index - start_index + 1 assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}""" if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(SCREAMING_SNAKE_CASE_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : Any = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : Optional[Any] = DPRReaderTokenizer
334
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = ["""image_processor""", """tokenizer"""] UpperCamelCase_ : Any = """ViTImageProcessor""" UpperCamelCase_ : Optional[int] = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : str ) -> Dict: '''simple docstring''' A: Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , SCREAMING_SNAKE_CASE_ , ) A: List[str] = kwargs.pop('''feature_extractor''' ) A: Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Union[str, Any]: '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: A: Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if visual_prompt is not None: A: List[str] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if images is not None: A: str = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if visual_prompt is not None and images is not None: A: Any = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: A: List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: A: Union[str, Any] = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def _snake_case ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor_class @property def _snake_case ( self : str ) -> Dict: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> str: if not isinstance(__lowercase , __lowercase ) or n < 0: raise ValueError('''Invalid input''' ) A: List[str] = 1_0**n A: Tuple = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __lowercase )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(f'{solution(10) = }')
334
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' pass class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> None: '''simple docstring''' A: Any = data A: Node | None = None def __iter__( self : Optional[int] ) -> List[str]: '''simple docstring''' A: List[str] = self A: Dict = [] while node: if node in visited: raise ContainsLoopError visited.append(SCREAMING_SNAKE_CASE_ ) yield node.data A: str = node.next_node @property def _snake_case ( self : List[str] ) -> bool: '''simple docstring''' try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": UpperCamelCase = Node(1) UpperCamelCase = Node(2) UpperCamelCase = Node(3) UpperCamelCase = Node(4) print(root_node.has_loop) # False UpperCamelCase = root_node.next_node print(root_node.has_loop) # True UpperCamelCase = Node(5) UpperCamelCase = Node(6) UpperCamelCase = Node(5) UpperCamelCase = Node(6) print(root_node.has_loop) # False UpperCamelCase = Node(1) print(root_node.has_loop) # False
334
1
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( '''files''' , [ ['''full:README.md''', '''dataset_infos.json'''], ['''empty:README.md''', '''dataset_infos.json'''], ['''dataset_infos.json'''], ['''full:README.md'''], ] , ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: A: List[Any] = tmp_path_factory.mktemp('''dset_infos_dir''' ) if "full:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''---\ndataset_info:\n dataset_size: 42\n---''' ) if "empty:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''''' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f: f.write('''{"default": {"dataset_size": 42}}''' ) A: str = DatasetInfosDict.from_directory(__lowercase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 4_2 @pytest.mark.parametrize( '''dataset_info''' , [ DatasetInfo(), DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , ), ] , ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: A: Optional[Any] = str(__lowercase ) dataset_info.write_to_directory(__lowercase ) A: List[str] = DatasetInfo.from_directory(__lowercase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__lowercase , '''dataset_info.json''' ) ) def SCREAMING_SNAKE_CASE( ) -> Tuple: A: str = DatasetInfo( description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , ) A: Union[str, Any] = dataset_info._to_yaml_dict() assert sorted(__lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) A: Union[str, Any] = yaml.safe_dump(__lowercase ) A: Tuple = yaml.safe_load(__lowercase ) assert dataset_info_yaml_dict == reloaded def SCREAMING_SNAKE_CASE( ) -> Optional[int]: A: List[str] = DatasetInfo() A: Dict = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( '''dataset_infos_dict''' , [ DatasetInfosDict(), DatasetInfosDict({'''default''': DatasetInfo()} ), DatasetInfosDict({'''my_config_name''': DatasetInfo()} ), DatasetInfosDict( { '''default''': DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , ) } ), DatasetInfosDict( { '''v1''': DatasetInfo(dataset_size=4_2 ), '''v2''': DatasetInfo(dataset_size=1_3_3_7 ), } ), ] , ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: A: Any = str(__lowercase ) dataset_infos_dict.write_to_directory(__lowercase ) A: Optional[Any] = DatasetInfosDict.from_directory(__lowercase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): A: List[str] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml A: List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__lowercase , '''README.md''' ) )
334
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase = 4 ) -> list[list[int]]: A: Tuple = abs(__lowercase ) or 4 return [[1 + x + y * row_size for x in range(__lowercase )] for y in range(__lowercase )] def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(transpose(__lowercase ) ) # OR.. transpose(reverse_column(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_row(reverse_column(__lowercase ) ) # OR.. reverse_column(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: return reverse_column(transpose(__lowercase ) ) # OR.. transpose(reverse_row(matrix)) def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Union[str, Any] = [list(__lowercase ) for x in zip(*__lowercase )] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[int] = matrix[::-1] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> list[list[int]]: A: Optional[Any] = [x[::-1] for x in matrix] return matrix def SCREAMING_SNAKE_CASE( __lowercase ) -> None: for i in matrix: print(*__lowercase ) if __name__ == "__main__": UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 90 counterclockwise:\n''') print_matrix(rotate_aa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 180:\n''') print_matrix(rotate_aaa(matrix)) UpperCamelCase = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 270 counterclockwise:\n''') print_matrix(rotate_aaa(matrix))
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { '''configuration_trajectory_transformer''': [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrajectoryTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrajectoryTransformerModel''', '''TrajectoryTransformerPreTrainedModel''', '''load_tf_weights_in_trajectory_transformer''', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
'''simple docstring''' from __future__ import annotations import numpy as np def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: return np.maximum(0 , __lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
334
1
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase = get_tests_dir('''fixtures''') UpperCamelCase = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') UpperCamelCase = get_tests_dir('''fixtures/dummy-config.json''') class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Optional[Any] = 0 def _snake_case ( self : str ) -> str: '''simple docstring''' A: List[Any] = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str ) -> Any: '''simple docstring''' A: str = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A: str = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally A: Any = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict() config_dict.pop('''feature_extractor_type''' ) A: Dict = WavaVecaFeatureExtractor(**SCREAMING_SNAKE_CASE_ ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) config.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: Tuple = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) # make sure private variable is not incorrectly saved A: Optional[int] = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' A: Tuple = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , '''bert-base is not a local folder and is not a valid model identifier''' ): A: Any = AutoFeatureExtractor.from_pretrained('''bert-base''' ) def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A: Optional[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ , revision='''aaaaaa''' ) def _snake_case ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex( SCREAMING_SNAKE_CASE_ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): A: Any = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: Union[str, Any] = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: Tuple = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) A: Any = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: List[str] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) def _snake_case ( self : int ) -> Dict: '''simple docstring''' try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE_ ): AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now that the config is registered, it can be used as any other config with the auto-API A: List[Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: Any = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def _snake_case ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = True try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # If remote code is not set, the default is to use local A: List[Any] = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. A: Optional[Any] = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub A: Optional[int] = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) self.assertTrue(not hasattr(SCREAMING_SNAKE_CASE_ , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCamelCase = {'''tokenization_herbert''': ['''HerbertTokenizer''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''HerbertTokenizerFast'''] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
'''simple docstring''' from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
334
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Tuple = torch.load(__lowercase , map_location='''cpu''' ) return sd def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Union[str, Any]: A: List[Any] = OrderedDict() A: Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A: int = key for name_pair in rename_keys_prefix: A: Union[str, Any] = new_key.replace(name_pair[0] , name_pair[1] ) A: int = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A: Optional[int] = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: A: Optional[Any] = '''pretraining''' if "vcr" in checkpoint_path: A: Optional[Any] = {'''visual_embedding_dim''': 5_1_2} elif "vqa_advanced" in checkpoint_path: A: Any = {'''visual_embedding_dim''': 2_0_4_8} elif "vqa" in checkpoint_path: A: str = {'''visual_embedding_dim''': 2_0_4_8} elif "nlvr" in checkpoint_path: A: int = {'''visual_embedding_dim''': 1_0_2_4} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: A: Tuple = {'''visual_embedding_dim''': 5_1_2} A: str = '''multichoice''' elif "vqa_advanced" in checkpoint_path: A: str = {'''visual_embedding_dim''': 2_0_4_8} A: Any = '''vqa_advanced''' elif "vqa" in checkpoint_path: A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9} A: str = '''vqa''' elif "nlvr" in checkpoint_path: A: Dict = { '''visual_embedding_dim''': 1_0_2_4, '''num_labels''': 2, } A: Any = '''nlvr''' A: List[str] = VisualBertConfig(**__lowercase ) # Load State Dict A: List[Any] = load_state_dict(__lowercase ) A: Tuple = get_new_dict(__lowercase , __lowercase ) if model_type == "pretraining": A: List[Any] = VisualBertForPreTraining(__lowercase ) elif model_type == "vqa": A: Tuple = VisualBertForQuestionAnswering(__lowercase ) elif model_type == "nlvr": A: Any = VisualBertForVisualReasoning(__lowercase ) elif model_type == "multichoice": A: Dict = VisualBertForMultipleChoice(__lowercase ) model.load_state_dict(__lowercase ) # Save Checkpoints Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
334
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' @register_to_config def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 6_55_36 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (32, 32, 64) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Tuple: '''simple docstring''' super().__init__() A: Optional[Any] = sample_size # time if time_embedding_type == "fourier": A: Tuple = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ) A: List[str] = 2 * block_out_channels[0] elif time_embedding_type == "positional": A: str = Timesteps( block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ ) A: Any = block_out_channels[0] if use_timestep_embedding: A: Optional[Any] = block_out_channels[0] * 4 A: List[Any] = TimestepEmbedding( in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , ) A: Optional[Any] = nn.ModuleList([] ) A: str = None A: str = nn.ModuleList([] ) A: Tuple = None # down A: Any = in_channels for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = output_channel A: List[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels A: List[Any] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[int] = get_down_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid A: Union[str, Any] = get_mid_block( SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , ) # up A: Optional[Any] = list(reversed(SCREAMING_SNAKE_CASE_ ) ) A: List[str] = reversed_block_out_channels[0] if out_block_type is None: A: int = out_channels else: A: Union[str, Any] = block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): A: List[Any] = output_channel A: int = ( reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels ) A: Optional[int] = i == len(SCREAMING_SNAKE_CASE_ ) - 1 A: Optional[Any] = get_up_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) A: Any = output_channel # out A: List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) A: Optional[int] = get_out_block( out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[UNetaDOutput, Tuple]: '''simple docstring''' A: Any = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): A: Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: A: List[str] = timesteps[None].to(sample.device ) A: int = self.time_proj(SCREAMING_SNAKE_CASE_ ) if self.config.use_timestep_embedding: A: List[Any] = self.time_mlp(SCREAMING_SNAKE_CASE_ ) else: A: str = timestep_embed[..., None] A: Union[str, Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) A: Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down A: List[str] = () for downsample_block in self.down_blocks: A , A: Optional[int] = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: A: Dict = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): A: List[Any] = down_block_res_samples[-1:] A: List[str] = down_block_res_samples[:-1] A: Optional[int] = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) # 5. post-process if self.out_block: A: Any = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''') UpperCamelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def SCREAMING_SNAKE_CASE( __lowercase ) -> Any: with open(__lowercase , '''rb''' ) as f: A: str = Image.open(__lowercase ) return im.convert('''RGB''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).""" } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the training data."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the validation data."""} ) UpperCamelCase_ : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def _snake_case ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( '''You must specify either a dataset name from the hub or a train and/or validation directory.''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : str = field( default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) UpperCamelCase_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase_ : str = field(default=UpperCAmelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: A: Optional[int] = torch.stack([example['''pixel_values'''] for example in examples] ) A: str = torch.tensor([example['''labels'''] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def SCREAMING_SNAKE_CASE( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A: Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A: List[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_image_classification''' , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A: Tuple = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. A: Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A: int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: A: str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , ) else: A: Optional[int] = {} if data_args.train_dir is not None: A: Optional[int] = os.path.join(data_args.train_dir , '''**''' ) if data_args.validation_dir is not None: A: Optional[int] = os.path.join(data_args.validation_dir , '''**''' ) A: Optional[int] = load_dataset( '''imagefolder''' , data_files=__lowercase , cache_dir=model_args.cache_dir , task='''image-classification''' , ) # If we don't have a validation split, split off a percentage of train as validation. A: Any = None if '''validation''' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: A: Optional[int] = dataset['''train'''].train_test_split(data_args.train_val_split ) A: int = split['''train'''] A: Optional[int] = split['''test'''] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. A: int = dataset['''train'''].features['''labels'''].names A , A: List[Any] = {}, {} for i, label in enumerate(__lowercase ): A: Tuple = str(__lowercase ) A: Tuple = label # Load the accuracy metric from the datasets package A: List[Any] = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowercase ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) A: List[str] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A: Optional[int] = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) A: Optional[Any] = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: A: List[Any] = image_processor.size['''shortest_edge'''] else: A: List[str] = (image_processor.size['''height'''], image_processor.size['''width''']) A: Any = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) A: Tuple = Compose( [ RandomResizedCrop(__lowercase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) A: str = Compose( [ Resize(__lowercase ), CenterCrop(__lowercase ), ToTensor(), normalize, ] ) def train_transforms(__lowercase ): A: Optional[int] = [ _train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image'''] ] return example_batch def val_transforms(__lowercase ): A: Union[str, Any] = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: A: Optional[int] = ( dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: A: Tuple = ( dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(__lowercase ) # Initalize our trainer A: List[str] = Trainer( model=__lowercase , args=__lowercase , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: A: Union[str, Any] = None if training_args.resume_from_checkpoint is not None: A: Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: A: str = last_checkpoint A: Tuple = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A: Optional[int] = trainer.evaluate() trainer.log_metrics('''eval''' , __lowercase ) trainer.save_metrics('''eval''' , __lowercase ) # Write model card and (optionally) push to hub A: Tuple = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''image-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''image-classification''', '''vision'''], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
334
'''simple docstring''' import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None: '''simple docstring''' warnings.warn( '''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
334
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[int] = """vit_mae""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=7_68 , SCREAMING_SNAKE_CASE_ : Dict=12 , SCREAMING_SNAKE_CASE_ : int=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Optional[int]=1E-12 , SCREAMING_SNAKE_CASE_ : Dict=2_24 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE_ : Optional[int]=8 , SCREAMING_SNAKE_CASE_ : Optional[int]=20_48 , SCREAMING_SNAKE_CASE_ : int=0.75 , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Tuple: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) A: List[Any] = hidden_size A: Tuple = num_hidden_layers A: int = num_attention_heads A: Union[str, Any] = intermediate_size A: str = hidden_act A: Optional[Any] = hidden_dropout_prob A: Tuple = attention_probs_dropout_prob A: Optional[Any] = initializer_range A: Tuple = layer_norm_eps A: Union[str, Any] = image_size A: Tuple = patch_size A: int = num_channels A: List[str] = qkv_bias A: Dict = decoder_num_attention_heads A: List[str] = decoder_hidden_size A: Any = decoder_num_hidden_layers A: str = decoder_intermediate_size A: Union[str, Any] = mask_ratio A: Any = norm_pix_loss
334
'''simple docstring''' from collections import deque class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' A: Union[str, Any] = process_name # process name A: List[str] = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A: Dict = arrival_time A: Optional[Any] = burst_time # remaining burst time A: Any = 0 # total time of the process wait in ready queue A: Any = 0 # time from arrival time to completion time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ) -> None: '''simple docstring''' A: Dict = number_of_queues # time slice of queues that round robin algorithm applied A: int = time_slices # unfinished process is in this ready_queue A: Tuple = queue # current time A: int = current_time # finished process is in this sequence queue A: deque[Process] = deque() def _snake_case ( self : List[Any] ) -> list[str]: '''simple docstring''' A: str = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Optional[int] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: Any = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[Process] ) -> list[int]: '''simple docstring''' A: List[Any] = [] for i in range(len(SCREAMING_SNAKE_CASE_ ) ): completion_times.append(queue[i].stop_time ) return completion_times def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> list[int]: '''simple docstring''' return [q.burst_time for q in queue] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Process ) -> int: '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] ) -> deque[Process]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of finished process while len(SCREAMING_SNAKE_CASE_ ) != 0: A: Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A: Any = 0 # set the process's turnaround time because it is finished A: int = self.current_time - cp.arrival_time # set the completion time A: List[str] = self.current_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ) -> tuple[deque[Process], deque[Process]]: '''simple docstring''' A: deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(SCREAMING_SNAKE_CASE_ ) ): A: Dict = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(SCREAMING_SNAKE_CASE_ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A: Optional[Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(SCREAMING_SNAKE_CASE_ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A: int = 0 # set the finish time A: Union[str, Any] = self.current_time # update the process' turnaround time because it is finished A: Tuple = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE_ ) self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _snake_case ( self : Optional[Any] ) -> deque[Process]: '''simple docstring''' for i in range(self.number_of_queues - 1 ): A , A: Optional[Any] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) UpperCamelCase = Process('''P1''', 0, 53) UpperCamelCase = Process('''P2''', 0, 17) UpperCamelCase = Process('''P3''', 0, 68) UpperCamelCase = Process('''P4''', 0, 24) UpperCamelCase = 3 UpperCamelCase = [17, 25] UpperCamelCase = deque([Pa, Pa, Pa, Pa]) UpperCamelCase = MLFQ(number_of_queues, time_slices, queue, 0) UpperCamelCase = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( f'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( f'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
334
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> str: A: List[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: for i in range(config.num_hidden_layers ): A: Dict = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A: Any = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict A: Optional[Any] = in_proj_weight[ : config.hidden_size, : ] A: List[str] = in_proj_bias[: config.hidden_size] A: Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A: Optional[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A: Optional[int] = in_proj_weight[ -config.hidden_size :, : ] A: str = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Optional[Any] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: A: List[str] = dct.pop(__lowercase ) A: Any = val @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: A: Optional[int] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase ) A: Dict = False A: Tuple = False A: Optional[int] = False A: List[str] = False if "vqa" in checkpoint_url: A: Union[str, Any] = True A: List[str] = 3_1_2_9 A: Optional[Any] = '''huggingface/label-files''' A: List[str] = '''vqa2-id2label.json''' A: List[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: str = {int(__lowercase ): v for k, v in idalabel.items()} A: List[Any] = idalabel A: Optional[int] = {v: k for k, v in idalabel.items()} A: List[Any] = ViltForQuestionAnswering(__lowercase ) elif "nlvr" in checkpoint_url: A: List[str] = True A: Any = 2 A: Union[str, Any] = {0: '''False''', 1: '''True'''} A: Any = {v: k for k, v in config.idalabel.items()} A: Union[str, Any] = 3 A: Any = ViltForImagesAndTextClassification(__lowercase ) elif "irtr" in checkpoint_url: A: int = True A: List[str] = ViltForImageAndTextRetrieval(__lowercase ) elif "mlm_itm" in checkpoint_url: A: Optional[Any] = True A: str = ViltForMaskedLM(__lowercase ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys A: Tuple = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict'''] A: Union[str, Any] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase ) if mlm_model or irtr_model: A: Tuple = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) # load state dict into HuggingFace model model.eval() if mlm_model: A , A: Optional[Any] = model.load_state_dict(__lowercase , strict=__lowercase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(__lowercase ) # Define processor A: int = ViltImageProcessor(size=3_8_4 ) A: int = BertTokenizer.from_pretrained('''bert-base-uncased''' ) A: Any = ViltProcessor(__lowercase , __lowercase ) # Forward pass on example inputs (image + text) if nlvr_model: A: Optional[int] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: Union[str, Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: str = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) A: int = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: Dict = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: int = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: A: int = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw ) if mlm_model: A: Tuple = '''a bunch of [MASK] laying on a [MASK].''' else: A: Union[str, Any] = '''How many cats are there?''' A: Any = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: Optional[int] = model(**__lowercase ) # Verify outputs if mlm_model: A: Union[str, Any] = torch.Size([1, 1_1, 3_0_5_2_2] ) A: Any = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify masked token prediction equals "cats" A: Dict = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: A: Any = torch.Size([1, 3_1_2_9] ) A: Any = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify vqa prediction equals "2" A: Optional[Any] = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: A: int = torch.Size([1, 2] ) A: Tuple = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) processor.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
334
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger() @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> int: '''simple docstring''' A: List[str] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(SCREAMING_SNAKE_CASE_ ) def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Dict: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(SCREAMING_SNAKE_CASE_ ) [x.remove() for x in self.handles] return self @property def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : nn.Module UpperCamelCase_ : int = 0 UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]: '''simple docstring''' A: Dict = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized A: Tuple = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) ) A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise Exception( f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while""" f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" ) for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = True ) -> Any: print(F"""Converting {name}...""" ) with torch.no_grad(): A: Union[str, Any] = timm.create_model(__lowercase , pretrained=__lowercase ).eval() A: List[str] = ResNetForImageClassification(__lowercase ).eval() A: int = ModuleTransfer(src=__lowercase , dest=__lowercase ) A: List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(__lowercase ) assert torch.allclose(from_model(__lowercase ) , our_model(__lowercase ).logits ), "The model logits don't match the original one." A: str = F"""resnet{'-'.join(name.split('resnet' ) )}""" print(__lowercase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowercase , ) # we can use the convnext one A: Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , ) print(F"""Pushed {checkpoint_name}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> List[Any]: A: Union[str, Any] = '''imagenet-1k-id2label.json''' A: Union[str, Any] = 1_0_0_0 A: Optional[int] = (1, num_labels) A: Dict = '''huggingface/label-files''' A: Any = num_labels A: Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Optional[int] = {int(__lowercase ): v for k, v in idalabel.items()} A: Optional[int] = idalabel A: List[str] = {v: k for k, v in idalabel.items()} A: str = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase ) A: Optional[Any] = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(__lowercase , names_to_config[model_name] , __lowercase , __lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__lowercase , __lowercase , __lowercase , __lowercase ) return config, expected_shape if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1