code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' def a_ ( __snake_case : bytes ) -> str: """simple docstring""" return "".join([hex(__snake_case )[2:].zfill(2 ).upper() for byte in list(__snake_case )] ) def a_ ( __snake_case : str ) -> bytes: """simple docstring""" # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(__snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
6
'''simple docstring''' from itertools import product def a_ ( __snake_case : int , __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =sides_number lowerCamelCase_ =max_face_number * dice_number lowerCamelCase_ =[0] * (max_total + 1) lowerCamelCase_ =1 lowerCamelCase_ =range(__snake_case , max_face_number + 1 ) for dice_numbers in product(__snake_case , repeat=__snake_case ): lowerCamelCase_ =sum(__snake_case ) totals_frequencies[total] += 1 return totals_frequencies def a_ ( ) -> float: """simple docstring""" lowerCamelCase_ =total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCamelCase_ =total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCamelCase_ =0 lowerCamelCase_ =9 lowerCamelCase_ =4 * 9 lowerCamelCase_ =6 for peter_total in range(__snake_case , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCamelCase_ =(4**9) * (6**6) lowerCamelCase_ =peter_wins_count / total_games_number lowerCamelCase_ =round(__snake_case , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' from __future__ import annotations from cmath import sqrt def a_ ( __snake_case : int , __snake_case : int , __snake_case : int ) -> tuple[complex, complex]: """simple docstring""" if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) lowerCamelCase_ =b * b - 4 * a * c lowerCamelCase_ =(-b + sqrt(__snake_case )) / (2 * a) lowerCamelCase_ =(-b - sqrt(__snake_case )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def a_ ( ) -> Any: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =quadratic_roots(a=5 , b=6 , c=1 ) print(F'''The solutions are: {solutiona} and {solutiona}''' ) if __name__ == "__main__": main()
6
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union a_ : Tuple = TypeVar("""T""") a_ : Dict = Union[List[T], Tuple[T, ...]] a_ : int = Union[T, List[T], Dict[str, T]] a_ : Optional[Any] = Union[str, bytes, os.PathLike]
6
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =dataset lowerCamelCase_ =process lowerCamelCase_ =params def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.dataset[i] lowerCamelCase_ =self.process(lowerCAmelCase, **self.params ) return processed class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =loader lowerCamelCase_ =infer lowerCamelCase_ =params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCamelCase_ =None lowerCamelCase_ =loader_batch_size # Internal bookkeeping lowerCamelCase_ =None lowerCamelCase_ =None def __len__( self ): """simple docstring""" return len(self.loader ) def __iter__( self ): """simple docstring""" lowerCamelCase_ =iter(self.loader ) return self def lowercase__ ( self ): """simple docstring""" if isinstance(self._loader_batch_data, torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCamelCase_ =self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCamelCase_ ={} for k, element in self._loader_batch_data.items(): if isinstance(lowerCAmelCase, lowerCAmelCase ): # Convert ModelOutput to tuple first lowerCamelCase_ =element.to_tuple() if isinstance(element[0], torch.Tensor ): lowerCamelCase_ =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0], np.ndarray ): lowerCamelCase_ =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCAmelCase, lowerCAmelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0], torch.Tensor ): lowerCamelCase_ =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0], np.ndarray ): lowerCamelCase_ =tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCamelCase_ =None elif isinstance(element[self._loader_batch_index], torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase_ =element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index], np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCamelCase_ =np.expand_dims(element[self._loader_batch_index], 0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCamelCase_ =element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCamelCase_ =self._loader_batch_data.__class__(lowerCAmelCase ) self._loader_batch_index += 1 return result def lowercase__ ( self ): """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCamelCase_ =next(self.iterator ) lowerCamelCase_ =self.infer(lowerCAmelCase, **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(lowerCAmelCase, torch.Tensor ): lowerCamelCase_ =processed else: lowerCamelCase_ =list(processed.keys() )[0] lowerCamelCase_ =processed[key] if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =len(lowerCAmelCase ) else: lowerCamelCase_ =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase_ =observed_batch_size # Setting internal index to unwrap the batch lowerCamelCase_ =processed lowerCamelCase_ =0 return self.loader_batch_item() else: # We're not unrolling batches return processed class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" super().__init__(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def __iter__( self ): """simple docstring""" lowerCamelCase_ =iter(self.loader ) lowerCamelCase_ =None return self def lowercase__ ( self ): """simple docstring""" if self.subiterator is None: lowerCamelCase_ =self.infer(next(self.iterator ), **self.params ) try: # Try to return next item lowerCamelCase_ =next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCamelCase_ =self.infer(next(self.iterator ), **self.params ) lowerCamelCase_ =next(self.subiterator ) return processed class __UpperCamelCase ( lowerCamelCase__ ): def __iter__( self ): """simple docstring""" lowerCamelCase_ =iter(self.loader ) return self def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCamelCase_ =self.loader_batch_item() lowerCamelCase_ =item.pop('''is_last''' ) accumulator.append(lowerCAmelCase ) if is_last: return accumulator while not is_last: lowerCamelCase_ =self.infer(next(self.iterator ), **self.params ) if self.loader_batch_size is not None: if isinstance(lowerCAmelCase, torch.Tensor ): lowerCamelCase_ =processed else: lowerCamelCase_ =list(processed.keys() )[0] lowerCamelCase_ =processed[key] if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =len(lowerCAmelCase ) else: lowerCamelCase_ =first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCamelCase_ =observed_batch_size lowerCamelCase_ =processed lowerCamelCase_ =0 while self._loader_batch_index < self.loader_batch_size: lowerCamelCase_ =self.loader_batch_item() lowerCamelCase_ =item.pop('''is_last''' ) accumulator.append(lowerCAmelCase ) if is_last: return accumulator else: lowerCamelCase_ =processed lowerCamelCase_ =item.pop('''is_last''' ) accumulator.append(lowerCAmelCase ) return accumulator class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =dataset lowerCamelCase_ =key def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" return self.dataset[i][self.key] class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =dataset lowerCamelCase_ =keya lowerCamelCase_ =keya def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
6
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =search_prob lowerCamelCase_ =start_temperate lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =None while not search_end: lowerCamelCase_ =current_state.score() if best_state is None or current_score > best_state.score(): lowerCamelCase_ =current_state scores.append(__snake_case ) iterations += 1 lowerCamelCase_ =None lowerCamelCase_ =current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor lowerCamelCase_ =neighbors.pop(__snake_case ) lowerCamelCase_ =picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowerCamelCase_ =change * -1 # in case we are finding minimum if change > 0: # improves the solution lowerCamelCase_ =picked_neighbor else: lowerCamelCase_ =(math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowerCamelCase_ =picked_neighbor lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowerCamelCase_ =True else: lowerCamelCase_ =next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__snake_case ) , __snake_case ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing( prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : List[str] = simulated_annealing( prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return (3 * x**2) - (6 * y) a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" ) a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" )
6
1
'''simple docstring''' def a_ ( __snake_case : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =len(__snake_case ) for i in range(n - 1 ): for j in range(i + 1 , __snake_case ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def a_ ( __snake_case : Dict ) -> str: """simple docstring""" if len(__snake_case ) <= 1: return arr, 0 lowerCamelCase_ =len(__snake_case ) // 2 lowerCamelCase_ =arr[0:mid] lowerCamelCase_ =arr[mid:] lowerCamelCase_, lowerCamelCase_ =count_inversions_recursive(__snake_case ) lowerCamelCase_, lowerCamelCase_ =count_inversions_recursive(__snake_case ) lowerCamelCase_, lowerCamelCase_ =_count_cross_inversions(__snake_case , __snake_case ) lowerCamelCase_ =inversion_p + inversions_q + cross_inversions return c, num_inversions def a_ ( __snake_case : List[Any] , __snake_case : Any ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =lowerCamelCase_ =lowerCamelCase_ =0 while i < len(__snake_case ) and j < len(__snake_case ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__snake_case ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__snake_case ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def a_ ( ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) lowerCamelCase_ =count_inversions_bf(__snake_case ) lowerCamelCase_, lowerCamelCase_ =count_inversions_recursive(__snake_case ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''' , __snake_case ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() lowerCamelCase_ =count_inversions_bf(__snake_case ) lowerCamelCase_, lowerCamelCase_ =count_inversions_recursive(__snake_case ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , __snake_case ) # an empty list should also have zero inversions lowerCamelCase_ =[] lowerCamelCase_ =count_inversions_bf(__snake_case ) lowerCamelCase_, lowerCamelCase_ =count_inversions_recursive(__snake_case ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''' , __snake_case ) if __name__ == "__main__": main()
6
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a_ ( __snake_case : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def a_ ( __snake_case : List[Any] ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =emb.weight.shape lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case ) lowerCamelCase_ =emb.weight.data return lin_layer def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict: """simple docstring""" lowerCamelCase_ ={} for old_key in state_dict.keys(): lowerCamelCase_ =old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' ) else: lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCamelCase_ =state_dict[old_key] return new_dict def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =0 os.makedirs(__snake_case , exist_ok=__snake_case ) for expert in range(__snake_case ): lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(__snake_case ): lowerCamelCase_ =torch.load(__snake_case )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =os.path.join( __snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) torch.save(__snake_case , __snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__snake_case )[0]].dtype ) # Add the last block lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__snake_case ) == 1: lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) torch.save(__snake_case , __snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__snake_case , __snake_case ) # Otherwise, let's build the index lowerCamelCase_ ={} for idx, shard in enumerate(__snake_case ): lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' ) lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) for key in shard: lowerCamelCase_ =shard_file # Add the metadata lowerCamelCase_ ={'''total_size''': total_size} lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a_ : Tuple = parser.parse_args() a_ , a_ : int = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_28, args.dtype, ) a_ : Tuple = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28 ) config.save_pretrained(args.pytorch_dump_folder_path) a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
6
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: a_ : int = None a_ : List[Any] = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} a_ : Optional[Any] = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } a_ : Union[str, Any] = { """facebook/mbart-large-en-ro""": 10_24, """facebook/mbart-large-cc25""": 10_24, } # fmt: off a_ : Optional[int] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =VOCAB_FILES_NAMES lowercase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : str =PRETRAINED_VOCAB_FILES_MAP lowercase : int =['input_ids', 'attention_mask'] lowercase : Union[str, Any] =MBartTokenizer lowercase : List[int] =[] lowercase : List[int] =[] def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="</s>", lowerCAmelCase="<s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<mask>", lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =AddedToken(lowerCAmelCase, lstrip=lowerCAmelCase, rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) else mask_token super().__init__( vocab_file=lowerCAmelCase, tokenizer_file=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, sep_token=lowerCAmelCase, cls_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, mask_token=lowerCAmelCase, src_lang=lowerCAmelCase, tgt_lang=lowerCAmelCase, additional_special_tokens=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =False if not self.vocab_file else True lowerCamelCase_ =FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) lowerCamelCase_ ={ lang_code: self.convert_tokens_to_ids(lowerCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowerCamelCase_ =src_lang if src_lang is not None else '''en_XX''' lowerCamelCase_ =self.convert_tokens_to_ids(self._src_lang ) lowerCamelCase_ =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowercase__ ( self ): """simple docstring""" return self._src_lang @src_lang.setter def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) lowerCamelCase_ =src_lang lowerCamelCase_ =self(lowerCAmelCase, add_special_tokens=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =self.convert_tokens_to_ids(lowerCAmelCase ) lowerCamelCase_ =tgt_lang_id return inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = "en_XX", lowerCAmelCase = None, lowerCAmelCase = "ro_RO", **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =src_lang lowerCamelCase_ =tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def lowercase__ ( self ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.convert_tokens_to_ids(lowerCAmelCase ) lowerCamelCase_ =[] lowerCamelCase_ =[self.eos_token_id, self.cur_lang_code] lowerCamelCase_ =self.convert_ids_to_tokens(self.prefix_tokens ) lowerCamelCase_ =self.convert_ids_to_tokens(self.suffix_tokens ) lowerCamelCase_ =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.convert_tokens_to_ids(lowerCAmelCase ) lowerCamelCase_ =[] lowerCamelCase_ =[self.eos_token_id, self.cur_lang_code] lowerCamelCase_ =self.convert_ids_to_tokens(self.prefix_tokens ) lowerCamelCase_ =self.convert_ids_to_tokens(self.suffix_tokens ) lowerCamelCase_ =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str, pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str, special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens ) ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ): copyfile(self.vocab_file, lowerCAmelCase ) return (out_vocab_file,)
6
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =['image_processor', 'tokenizer'] lowercase : int ='LayoutLMv2ImageProcessor' lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ =features['''words'''] lowerCamelCase_ =self.tokenizer( text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, ) # add pixel values lowerCamelCase_ =features.pop('''pixel_values''' ) if return_overflowing_tokens is True: lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] ) lowerCamelCase_ =images return encoded_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' ) return images_with_overflow def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str =field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) lowercase : ClassVar[Features] =Features({'text': Value('string' )} ) lowercase : ClassVar[Features] =Features({} ) lowercase : str ="text" @property def lowercase__ ( self ): """simple docstring""" return {self.text_column: "text"}
6
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =VQModel lowercase : Union[str, Any] ='sample' @property def lowercase__ ( self, lowerCAmelCase=(32, 32) ): """simple docstring""" lowerCamelCase_ =4 lowerCamelCase_ =3 lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) return {"sample": image} @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={ '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 3, } lowerCamelCase_ =self.dummy_input return init_dict, inputs_dict def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ), 0 ) model.to(lowerCAmelCase ) lowerCamelCase_ =model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(lowerCAmelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size ) lowerCamelCase_ =image.to(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).sample lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
6
1
'''simple docstring''' import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : int , __snake_case : List[Any] , __snake_case : str ) -> Any: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =RemBertConfig.from_json_file(__snake_case ) print('''Building PyTorch model from configuration: {}'''.format(str(__snake_case ) ) ) lowerCamelCase_ =RemBertModel(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print('''Save PyTorch model to {}'''.format(__snake_case ) ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--rembert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained RemBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Dict = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
6
'''simple docstring''' import datasets from .evaluate import evaluate a_ : List[Any] = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ a_ : List[Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ a_ : Any = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} lowerCamelCase_ =[ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase ) return score
6
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def a_ ( __snake_case : str , __snake_case : str ) -> str | Literal[False]: """simple docstring""" lowerCamelCase_ =list(__snake_case ) lowerCamelCase_ =list(__snake_case ) lowerCamelCase_ =0 for i in range(len(__snake_case ) ): if lista[i] != lista[i]: count += 1 lowerCamelCase_ ='''_''' if count > 1: return False else: return "".join(__snake_case ) def a_ ( __snake_case : list[str] ) -> list[str]: """simple docstring""" lowerCamelCase_ =[] while True: lowerCamelCase_ =['''$'''] * len(__snake_case ) lowerCamelCase_ =[] for i in range(len(__snake_case ) ): for j in range(i + 1 , len(__snake_case ) ): lowerCamelCase_ =compare_string(binary[i] , binary[j] ) if k is False: lowerCamelCase_ ='''*''' lowerCamelCase_ ='''*''' temp.append('''X''' ) for i in range(len(__snake_case ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__snake_case ) == 0: return pi lowerCamelCase_ =list(set(__snake_case ) ) def a_ ( __snake_case : int , __snake_case : Sequence[float] ) -> list[str]: """simple docstring""" lowerCamelCase_ =[] for minterm in minterms: lowerCamelCase_ ='''''' for _ in range(__snake_case ): lowerCamelCase_ =str(minterm % 2 ) + string minterm //= 2 temp.append(__snake_case ) return temp def a_ ( __snake_case : str , __snake_case : str , __snake_case : int ) -> bool: """simple docstring""" lowerCamelCase_ =list(__snake_case ) lowerCamelCase_ =list(__snake_case ) lowerCamelCase_ =0 for i in range(len(__snake_case ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def a_ ( __snake_case : list[list[int]] , __snake_case : list[str] ) -> list[str]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =[0] * len(__snake_case ) for i in range(len(chart[0] ) ): lowerCamelCase_ =0 lowerCamelCase_ =-1 for j in range(len(__snake_case ) ): if chart[j][i] == 1: count += 1 lowerCamelCase_ =j if count == 1: lowerCamelCase_ =1 for i in range(len(__snake_case ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__snake_case ) ): lowerCamelCase_ =0 temp.append(prime_implicants[i] ) while True: lowerCamelCase_ =0 lowerCamelCase_ =-1 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): lowerCamelCase_ =chart[i].count(1 ) if count_n > max_n: lowerCamelCase_ =count_n lowerCamelCase_ =i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__snake_case ) ): lowerCamelCase_ =0 def a_ ( __snake_case : list[str] , __snake_case : list[str] ) -> list[list[int]]: """simple docstring""" lowerCamelCase_ =[[0 for x in range(len(__snake_case ) )] for x in range(len(__snake_case ) )] for i in range(len(__snake_case ) ): lowerCamelCase_ =prime_implicants[i].count('''_''' ) for j in range(len(__snake_case ) ): if is_for_table(prime_implicants[i] , binary[j] , __snake_case ): lowerCamelCase_ =1 return chart def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =int(input('''Enter the no. of variables\n''' ) ) lowerCamelCase_ =[ float(__snake_case ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] lowerCamelCase_ =decimal_to_binary(__snake_case , __snake_case ) lowerCamelCase_ =check(__snake_case ) print('''Prime Implicants are:''' ) print(__snake_case ) lowerCamelCase_ =prime_implicant_chart(__snake_case , __snake_case ) lowerCamelCase_ =selection(__snake_case , __snake_case ) print('''Essential Prime Implicants are:''' ) print(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod() main()
6
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer a_ : Tuple = logging.get_logger(__name__) a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_12, """facebook/dpr-ctx_encoder-multiset-base""": 5_12, } a_ : List[Any] = { """facebook/dpr-question_encoder-single-nq-base""": 5_12, """facebook/dpr-question_encoder-multiset-base""": 5_12, } a_ : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": 5_12, """facebook/dpr-reader-multiset-base""": 5_12, } a_ : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : Dict = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] =VOCAB_FILES_NAMES lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : Dict =DPRContextEncoderTokenizer class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : List[Any] =DPRQuestionEncoderTokenizer a_ : Union[str, Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) a_ : Dict = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(lowerCamelCase__ ) class __UpperCamelCase : def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if titles is None and texts is None: return super().__call__( lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) elif titles is None or texts is None: lowerCamelCase_ =titles if texts is None else texts return super().__call__( lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles] lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.''' lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ ={ '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase ) ] } if return_attention_mask is not False: lowerCamelCase_ =[] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCamelCase_ =attention_mask return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ): """simple docstring""" lowerCamelCase_ =reader_input['''input_ids'''] lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ ) lowerCamelCase_ =[] for doc_id in sorted_docs: lowerCamelCase_ =list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCamelCase_ =sequence_ids.index(self.pad_token_id ) else: lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =[] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase ) lowerCamelCase_ =[] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' lowerCamelCase_ =end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : int =VOCAB_FILES_NAMES lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION lowercase : int =['input_ids', 'attention_mask'] lowercase : Dict =DPRReaderTokenizer
6
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : Optional[Any] = { """facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] ='wav2vec2' def __init__( self, lowerCAmelCase=32, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-5, lowerCAmelCase="group", lowerCAmelCase="gelu", lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512), lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2), lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2), lowerCAmelCase=False, lowerCAmelCase=128, lowerCAmelCase=16, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=0.0_5, lowerCAmelCase=10, lowerCAmelCase=2, lowerCAmelCase=0.0, lowerCAmelCase=10, lowerCAmelCase=0, lowerCAmelCase=320, lowerCAmelCase=2, lowerCAmelCase=0.1, lowerCAmelCase=100, lowerCAmelCase=256, lowerCAmelCase=256, lowerCAmelCase=0.1, lowerCAmelCase="sum", lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=256, lowerCAmelCase=(512, 512, 512, 512, 1_500), lowerCAmelCase=(5, 3, 3, 1, 1), lowerCAmelCase=(1, 2, 3, 1, 1), lowerCAmelCase=512, lowerCAmelCase=0, lowerCAmelCase=1, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=3, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase, pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =feat_extract_norm lowerCamelCase_ =feat_extract_activation lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =conv_bias lowerCamelCase_ =num_conv_pos_embeddings lowerCamelCase_ =num_conv_pos_embedding_groups lowerCamelCase_ =len(self.conv_dim ) lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =feat_proj_dropout lowerCamelCase_ =final_dropout lowerCamelCase_ =layerdrop lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =initializer_range lowerCamelCase_ =vocab_size lowerCamelCase_ =do_stable_layer_norm lowerCamelCase_ =use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase_ =apply_spec_augment lowerCamelCase_ =mask_time_prob lowerCamelCase_ =mask_time_length lowerCamelCase_ =mask_time_min_masks lowerCamelCase_ =mask_feature_prob lowerCamelCase_ =mask_feature_length lowerCamelCase_ =mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase_ =num_codevectors_per_group lowerCamelCase_ =num_codevector_groups lowerCamelCase_ =contrastive_logits_temperature lowerCamelCase_ =feat_quantizer_dropout lowerCamelCase_ =num_negatives lowerCamelCase_ =codevector_dim lowerCamelCase_ =proj_codevector_dim lowerCamelCase_ =diversity_loss_weight # ctc loss lowerCamelCase_ =ctc_loss_reduction lowerCamelCase_ =ctc_zero_infinity # adapter lowerCamelCase_ =add_adapter lowerCamelCase_ =adapter_kernel_size lowerCamelCase_ =adapter_stride lowerCamelCase_ =num_adapter_layers lowerCamelCase_ =output_hidden_size or hidden_size lowerCamelCase_ =adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCamelCase_ =classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =xvector_output_dim @property def lowercase__ ( self ): """simple docstring""" return functools.reduce(operator.mul, self.conv_stride, 1 )
6
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase_ ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowerCamelCase_ =Dataset.from_dict(__snake_case ) return dataset class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ), 2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase ) self.assertEqual(len(lowerCAmelCase ), 2 ) print(lowerCAmelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
6
1
'''simple docstring''' import math import unittest def a_ ( __snake_case : int ) -> bool: """simple docstring""" assert isinstance(__snake_case , __snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def lowercase__ ( self ): """simple docstring""" with self.assertRaises(lowerCAmelCase ): is_prime(-19 ) self.assertFalse( is_prime(0 ), '''Zero doesn\'t have any positive factors, primes must have exactly two.''', ) self.assertFalse( is_prime(1 ), '''One only has 1 positive factor, primes must have exactly two.''', ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
6
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a_ : Any = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
1
'''simple docstring''' from __future__ import annotations a_ : Union[str, Any] = [] def a_ ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(len(__snake_case ) ): if board[row][i] == 1: return False for i in range(len(__snake_case ) ): if board[i][column] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(__snake_case , -1 , -1 ) , range(__snake_case , len(__snake_case ) ) ): if board[i][j] == 1: return False return True def a_ ( __snake_case : list[list[int]] , __snake_case : int ) -> bool: """simple docstring""" if row >= len(__snake_case ): solution.append(__snake_case ) printboard(__snake_case ) print() return True for i in range(len(__snake_case ) ): if is_safe(__snake_case , __snake_case , __snake_case ): lowerCamelCase_ =1 solve(__snake_case , row + 1 ) lowerCamelCase_ =0 return False def a_ ( __snake_case : list[list[int]] ) -> None: """simple docstring""" for i in range(len(__snake_case ) ): for j in range(len(__snake_case ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) a_ : List[str] = 8 a_ : Tuple = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("""The total no. of solutions are :""", len(solution))
6
'''simple docstring''' from collections import defaultdict from math import gcd def a_ ( __snake_case : int = 150_0000 ) -> int: """simple docstring""" lowerCamelCase_ =defaultdict(__snake_case ) lowerCamelCase_ =2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ): if gcd(__snake_case , __snake_case ) > 1: continue lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__snake_case , limit + 1 , __snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a_ : str = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Tuple = 16 a_ : Optional[int] = 32 def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__snake_case : int ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_ =datasets.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_ =16 elif accelerator.mixed_precision != "no": lowerCamelCase_ =8 else: lowerCamelCase_ =None return tokenizer.pad( __snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCamelCase_ =DataLoader( tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) lowerCamelCase_ =DataLoader( tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ : Tuple = mocked_dataloaders # noqa: F811 def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1": lowerCamelCase_ =2 # Initialize accelerator lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ =config['''lr'''] lowerCamelCase_ =int(config['''num_epochs'''] ) lowerCamelCase_ =int(config['''seed'''] ) lowerCamelCase_ =int(config['''batch_size'''] ) lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__snake_case ) def inner_training_loop(__snake_case : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ =model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case ) lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case ) # Instantiate scheduler lowerCamelCase_ =get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.logits.argmax(dim=-1 ) lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) lowerCamelCase_ =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __snake_case ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowerCamelCase_ =parser.parse_args() lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
6
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : List[Any] = logging.get_logger(__name__) a_ : Tuple = { """google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""", # See all ViT models at https://huggingface.co/models?filter=vit } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='vit' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=224, lowerCAmelCase=16, lowerCAmelCase=3, lowerCAmelCase=True, lowerCAmelCase=16, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =qkv_bias lowerCamelCase_ =encoder_stride class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =version.parse('1.11' ) @property def lowercase__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self ): """simple docstring""" return 1e-4
6
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py a_ : List[str] = """src/diffusers""" # Matches is_xxx_available() a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") a_ : Optional[Any] = """ {0} = None """ a_ : List[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ a_ : Optional[Any] = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def a_ ( __snake_case : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =_re_backend.findall(__snake_case ) if len(__snake_case ) == 0: return None return "_and_".join(__snake_case ) def a_ ( ) -> Optional[int]: """simple docstring""" with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase_ =0 lowerCamelCase_ ={} # Go through the end of the file while line_index < len(__snake_case ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase_ =find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 lowerCamelCase_ =[] # Until we unindent, add backend objects to the list while line_index < len(__snake_case ) and len(lines[line_index] ) > 1: lowerCamelCase_ =lines[line_index] lowerCamelCase_ =_re_single_line_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__snake_case ) > 0: lowerCamelCase_ =objects else: line_index += 1 return backend_specific_objects def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(__snake_case ) elif name.islower(): return DUMMY_FUNCTION.format(__snake_case , __snake_case ) else: return DUMMY_CLASS.format(__snake_case , __snake_case ) def a_ ( __snake_case : Tuple=None ) -> List[str]: """simple docstring""" if backend_specific_objects is None: lowerCamelCase_ =read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase_ ={} for backend, objects in backend_specific_objects.items(): lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']''' lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] ) lowerCamelCase_ =dummy_file return dummy_files def a_ ( __snake_case : Dict=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase_ ={'''torch''': '''pt'''} # Locate actual dummy modules and read their content. lowerCamelCase_ =os.path.join(__snake_case , '''utils''' ) lowerCamelCase_ ={ backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase_ ={} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.read() else: lowerCamelCase_ ='''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main ''' '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` ''' '''to fix this.''' ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : Tuple = parser.parse_args() check_dummies(args.fix_and_overwrite)
6
1
'''simple docstring''' from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def a_ ( __snake_case : Sequence[float] , __snake_case : int , __snake_case : int ) -> tuple[int | None, int | None, float]: """simple docstring""" if not arr: return None, None, 0 if low == high: return low, high, arr[low] lowerCamelCase_ =(low + high) // 2 lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =max_subarray(__snake_case , __snake_case , __snake_case ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =max_subarray(__snake_case , mid + 1 , __snake_case ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =max_cross_sum(__snake_case , __snake_case , __snake_case , __snake_case ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def a_ ( __snake_case : Sequence[float] , __snake_case : int , __snake_case : int , __snake_case : int ) -> tuple[int, int, float]: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =float('''-inf''' ), -1 lowerCamelCase_, lowerCamelCase_ =float('''-inf''' ), -1 lowerCamelCase_ =0 for i in range(__snake_case , low - 1 , -1 ): summ += arr[i] if summ > left_sum: lowerCamelCase_ =summ lowerCamelCase_ =i lowerCamelCase_ =0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: lowerCamelCase_ =summ lowerCamelCase_ =i return max_left, max_right, (left_sum + right_sum) def a_ ( __snake_case : int ) -> float: """simple docstring""" lowerCamelCase_ =[randint(1 , __snake_case ) for _ in range(__snake_case )] lowerCamelCase_ =time.time() max_subarray(__snake_case , 0 , input_size - 1 ) lowerCamelCase_ =time.time() return end - start def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =[10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000] lowerCamelCase_ =[time_max_subarray(__snake_case ) for input_size in input_sizes] print('''No of Inputs\t\tTime Taken''' ) for input_size, runtime in zip(__snake_case , __snake_case ): print(__snake_case , '''\t\t''' , __snake_case ) plt.plot(__snake_case , __snake_case ) plt.xlabel('''Number of Inputs''' ) plt.ylabel('''Time taken in seconds''' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
6
'''simple docstring''' a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def a_ ( __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ =0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution a_ : list[bool | None] = [None] * 10_00_00_00 a_ : List[Any] = True a_ : Optional[Any] = False def a_ ( __snake_case : int ) -> bool: """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase_ =chain(next_number(__snake_case ) ) lowerCamelCase_ =number_chain while number < 1000_0000: lowerCamelCase_ =number_chain number *= 10 return number_chain def a_ ( __snake_case : int = 1000_0000 ) -> int: """simple docstring""" for i in range(1 , __snake_case ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution() = }""")
6
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] )
6
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a_ ( __snake_case : Tuple ) -> str: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class __UpperCamelCase ( lowerCamelCase__ ): @staticmethod def lowercase__ ( lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =parser.add_parser('''download''' ) download_parser.add_argument( '''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' ) download_parser.add_argument( '''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' ) download_parser.add_argument( '''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', ) download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' ) download_parser.set_defaults(func=lowerCAmelCase ) def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model lowerCamelCase_ =cache lowerCamelCase_ =force lowerCamelCase_ =trust_remote_code def lowercase__ ( self ): """simple docstring""" from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
6
1
'''simple docstring''' import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging a_ : Optional[Any] = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""] a_ : Optional[int] = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse("""0.9.0"""): raise Exception("""requires fairseq >= 0.9.0""") logging.set_verbosity_info() a_ : Union[str, Any] = logging.get_logger(__name__) a_ : List[str] = """ Hello world! cécé herlolip""" a_ : int = [ ("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""), ("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""), ("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""), ("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""), ] def a_ ( __snake_case : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any ) -> List[str]: """simple docstring""" lowerCamelCase_ =dct.pop(__snake_case ) lowerCamelCase_ =val def a_ ( __snake_case : Dict ) -> List[Any]: """simple docstring""" lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) lowerCamelCase_ =torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def a_ ( __snake_case : Union[str, Any] ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =emb.weight.shape lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case ) lowerCamelCase_ =emb.weight.data return lin_layer @torch.no_grad() def a_ ( __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Tuple=None ) -> List[Any]: """simple docstring""" if not os.path.exists(__snake_case ): lowerCamelCase_ =torch.hub.load('''pytorch/fairseq''' , __snake_case ).eval() else: lowerCamelCase_ =load_xsum_checkpoint(__snake_case ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCamelCase_ =checkpoint_path.replace('''.''' , '''-''' ) lowerCamelCase_ =BartConfig.from_pretrained(__snake_case ) lowerCamelCase_ =bart.encode(__snake_case ).unsqueeze(0 ) lowerCamelCase_ =BartTokenizer.from_pretrained(__snake_case ).encode(__snake_case , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(__snake_case , __snake_case ).all(): raise ValueError( F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": lowerCamelCase_ =bart.state_dict() remove_ignore_keys_(__snake_case ) lowerCamelCase_ =state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) lowerCamelCase_ =BartForSequenceClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) lowerCamelCase_ =bart.predict('''mnli''' , __snake_case , return_logits=__snake_case ) lowerCamelCase_ =model(__snake_case )[0] # logits else: # no classification heads to worry about lowerCamelCase_ =bart.model.state_dict() remove_ignore_keys_(__snake_case ) lowerCamelCase_ =state_dict['''decoder.embed_tokens.weight'''] lowerCamelCase_ =bart.extract_features(__snake_case ) if hf_checkpoint_name == "facebook/bart-large": lowerCamelCase_ =BartModel(__snake_case ).eval() model.load_state_dict(__snake_case ) lowerCamelCase_ =model(__snake_case ).model[0] else: lowerCamelCase_ =BartForConditionalGeneration(__snake_case ).eval() # an existing summarization ckpt model.model.load_state_dict(__snake_case ) if hasattr(__snake_case , '''lm_head''' ): lowerCamelCase_ =make_linear_from_emb(model.model.shared ) lowerCamelCase_ =model.model(__snake_case )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) model.save_pretrained(__snake_case ) if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem.""" ) parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum""" ) a_ : List[str] = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
6
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features a_ : List[str] = logging.get_logger(__name__) a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : lowercase : str =field( default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} ) lowercase : str =field( default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) lowercase : int =field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowercase : int =field( default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) lowercase : int =field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) lowercase : int =field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) lowercase : float =field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase : int =field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase : int =field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='train' lowercase : Any ='dev' class __UpperCamelCase ( lowerCamelCase__ ): lowercase : SquadDataTrainingArguments lowercase : List[SquadFeatures] lowercase : Split lowercase : bool def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ): """simple docstring""" lowerCamelCase_ =args lowerCamelCase_ =is_language_sensitive lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowerCAmelCase, lowerCAmelCase ): try: lowerCamelCase_ =Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) lowerCamelCase_ =mode # Load data features from cache or dataset file lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1''' lowerCamelCase_ =os.path.join( cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase_ =cached_features_file + '''.lock''' with FileLock(lowerCAmelCase ): if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache: lowerCamelCase_ =time.time() lowerCamelCase_ =torch.load(lowerCAmelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase_ =self.old_features['''features'''] lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase ) lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ''' future run''' ) else: if mode == Split.dev: lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase_ =self.processor.get_train_examples(args.data_dir ) lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features( examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, ) lowerCamelCase_ =time.time() torch.save( {'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.features[i] lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float ) lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': attention_mask, '''token_type_ids''': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} ) if self.args.version_2_with_negative: inputs.update({'''is_impossible''': is_impossible} ) if self.is_language_sensitive: inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} ) return inputs
6
1
'''simple docstring''' def a_ ( __snake_case : list , __snake_case : list , __snake_case : int ) -> list: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =[[0] * n for i in range(__snake_case )] for i in range(__snake_case ): lowerCamelCase_ =y_points[i] for i in range(2 , __snake_case ): for j in range(__snake_case , __snake_case ): lowerCamelCase_ =( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
6
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a_ : Any = logging.get_logger(__name__) a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/""" a_ : Any = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def a_ ( __snake_case : int ) -> Any: """simple docstring""" if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowerCamelCase_ =key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ ={} import re lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case ) elif re_encoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case ) elif re_encoder_block_proj_out.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case ) elif re_decoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case ) elif re_decoder_block_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case ) elif re_prior_cond_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case ) elif re_prior_cond_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case ) # keep original key else: lowerCamelCase_ =original_key lowerCamelCase_ =replace_key(__snake_case ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) lowerCamelCase_ =original_key lowerCamelCase_ =original_key lowerCamelCase_ =value return new_dict @torch.no_grad() def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case ) open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content ) lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]] lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case ) lowerCamelCase_ =JukeboxModel(__snake_case ) lowerCamelCase_ =[] lowerCamelCase_ ={} for i, dict_name in enumerate(__snake_case ): lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model'''] lowerCamelCase_ ={} for k in old_dic.keys(): if k.endswith('''.b''' ): lowerCamelCase_ =old_dic[k] elif k.endswith('''.w''' ): lowerCamelCase_ =old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowerCamelCase_ =old_dic[k] else: lowerCamelCase_ =old_dic[k] lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}''' lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case ) weight_dict.append(__snake_case ) lowerCamelCase_ =weight_dict.pop(0 ) model.vqvae.load_state_dict(__snake_case ) for i in range(len(__snake_case ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile: json.dump(__snake_case , __snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) return weight_dict if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) a_ : Optional[int] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
6
1
'''simple docstring''' import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-canny''', from_pt=lowerCAmelCase, dtype=jnp.bfloataa ) lowerCamelCase_, lowerCamelCase_ =FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''', controlnet=lowerCAmelCase, from_pt=lowerCAmelCase, dtype=jnp.bfloataa ) lowerCamelCase_ =controlnet_params lowerCamelCase_ ='''bird''' lowerCamelCase_ =jax.device_count() lowerCamelCase_ =pipe.prepare_text_inputs([prompts] * num_samples ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ) lowerCamelCase_ =pipe.prepare_image_inputs([canny_image] * num_samples ) lowerCamelCase_ =jax.random.PRNGKey(0 ) lowerCamelCase_ =jax.random.split(lowerCAmelCase, jax.device_count() ) lowerCamelCase_ =replicate(lowerCAmelCase ) lowerCamelCase_ =shard(lowerCAmelCase ) lowerCamelCase_ =shard(lowerCAmelCase ) lowerCamelCase_ =pipe( prompt_ids=lowerCAmelCase, image=lowerCAmelCase, params=lowerCAmelCase, prng_seed=lowerCAmelCase, num_inference_steps=50, jit=lowerCAmelCase, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) lowerCamelCase_ =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCamelCase_ =images[0, 253:256, 253:256, -1] lowerCamelCase_ =jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCamelCase_ =jnp.array( [0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =FlaxControlNetModel.from_pretrained( '''lllyasviel/sd-controlnet-openpose''', from_pt=lowerCAmelCase, dtype=jnp.bfloataa ) lowerCamelCase_, lowerCamelCase_ =FlaxStableDiffusionControlNetPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''', controlnet=lowerCAmelCase, from_pt=lowerCAmelCase, dtype=jnp.bfloataa ) lowerCamelCase_ =controlnet_params lowerCamelCase_ ='''Chef in the kitchen''' lowerCamelCase_ =jax.device_count() lowerCamelCase_ =pipe.prepare_text_inputs([prompts] * num_samples ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' ) lowerCamelCase_ =pipe.prepare_image_inputs([pose_image] * num_samples ) lowerCamelCase_ =jax.random.PRNGKey(0 ) lowerCamelCase_ =jax.random.split(lowerCAmelCase, jax.device_count() ) lowerCamelCase_ =replicate(lowerCAmelCase ) lowerCamelCase_ =shard(lowerCAmelCase ) lowerCamelCase_ =shard(lowerCAmelCase ) lowerCamelCase_ =pipe( prompt_ids=lowerCAmelCase, image=lowerCAmelCase, params=lowerCAmelCase, prng_seed=lowerCAmelCase, num_inference_steps=50, jit=lowerCAmelCase, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) lowerCamelCase_ =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowerCamelCase_ =images[0, 253:256, 253:256, -1] lowerCamelCase_ =jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowerCamelCase_ =jnp.array( [[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
6
'''simple docstring''' def a_ ( __snake_case : int = 1000 ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =1, 1 lowerCamelCase_ =2 while True: lowerCamelCase_ =0 lowerCamelCase_ =fa + fa lowerCamelCase_, lowerCamelCase_ =fa, f index += 1 for _ in str(__snake_case ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
6
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Tuple = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys a_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def a_ ( __snake_case : Any ) -> Tuple: """simple docstring""" lowerCamelCase_ =test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' F'''{test_file} instead.''' ) lowerCamelCase_ =components[-1] if not test_fn.endswith('''py''' ): raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )] lowerCamelCase_ ='''.'''.join(__snake_case ) return test_module_path def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_module_path(__snake_case ) lowerCamelCase_ =importlib.import_module(__snake_case ) return test_module def a_ ( __snake_case : Dict ) -> Tuple: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(__snake_case , __snake_case ) ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] ) if len(__snake_case ) > 0: test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =test_class() if hasattr(__snake_case , '''setUp''' ): test.setUp() lowerCamelCase_ =None if hasattr(__snake_case , '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: lowerCamelCase_ =test.model_tester.__class__ return model_tester def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =[] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case ) lowerCamelCase_ =[] for test_class in test_classes: lowerCamelCase_ =get_model_tester_from_test_class(__snake_case ) if tester_class is not None: tester_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Tuple ) -> Tuple: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes} return test_tester_mapping def a_ ( __snake_case : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_test_mapping def a_ ( __snake_case : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_to_tester_mapping def a_ ( __snake_case : List[str] ) -> List[Any]: """simple docstring""" if isinstance(__snake_case , __snake_case ): return o elif isinstance(__snake_case , __snake_case ): return o.__name__ elif isinstance(__snake_case , (list, tuple) ): return [to_json(__snake_case ) for x in o] elif isinstance(__snake_case , __snake_case ): return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()} else: return o
6
1
'''simple docstring''' import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup a_ : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''bs4'''] ) super().__init__(**lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag lowerCamelCase_ =parent.find_all(child.name, recursive=lowerCAmelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(lowerCAmelCase ) else next(i for i, s in enumerate(lowerCAmelCase, 1 ) if s is child ) ) lowerCamelCase_ =parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =BeautifulSoup(lowerCAmelCase, '''html.parser''' ) lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =[] for element in html_code.descendants: if type(lowerCAmelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue lowerCamelCase_ =html.unescape(lowerCAmelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =self.xpath_soup(lowerCAmelCase ) stringaxtag_seq.append(lowerCAmelCase ) stringaxsubs_seq.append(lowerCAmelCase ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''''' for tagname, subs in zip(lowerCAmelCase, lowerCAmelCase ): xpath += f'''/{tagname}''' if subs != 0: xpath += f'''[{subs}]''' return xpath def __call__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =False # Check that strings has a valid type if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =True elif isinstance(lowerCAmelCase, (list, tuple) ): if len(lowerCAmelCase ) == 0 or isinstance(html_strings[0], lowerCAmelCase ): lowerCamelCase_ =True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f'''but is of type {type(lowerCAmelCase )}.''' ) lowerCamelCase_ =bool(isinstance(lowerCAmelCase, (list, tuple) ) and (isinstance(html_strings[0], lowerCAmelCase )) ) if not is_batched: lowerCamelCase_ =[html_strings] # Get nodes + xpaths lowerCamelCase_ =[] lowerCamelCase_ =[] for html_string in html_strings: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =self.get_three_from_single(lowerCAmelCase ) nodes.append(lowerCAmelCase ) lowerCamelCase_ =[] for node, tag_list, sub_list in zip(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =self.construct_xpath(lowerCAmelCase, lowerCAmelCase ) xpath_strings.append(lowerCAmelCase ) xpaths.append(lowerCAmelCase ) # return as Dict lowerCamelCase_ ={'''nodes''': nodes, '''xpaths''': xpaths} lowerCamelCase_ =BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase ) return encoded_inputs
6
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] )
6
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __UpperCamelCase : @staticmethod def lowercase__ ( *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" pass @is_pipeline_test @require_vision @require_timm @require_torch class __UpperCamelCase ( unittest.TestCase ): lowercase : Dict =MODEL_FOR_OBJECT_DETECTION_MAPPING def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =ObjectDetectionPipeline(model=lowerCAmelCase, image_processor=lowerCAmelCase ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''', threshold=0.0 ) self.assertGreater(len(lowerCAmelCase ), 0 ) for detected_object in outputs: self.assertEqual( lowerCAmelCase, { '''score''': ANY(lowerCAmelCase ), '''label''': ANY(lowerCAmelCase ), '''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )}, }, ) import datasets lowerCamelCase_ =datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''', '''image''', split='''test''' ) lowerCamelCase_ =[ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] lowerCamelCase_ =object_detector(lowerCAmelCase, threshold=0.0 ) self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ) ) for outputs in batch_outputs: self.assertGreater(len(lowerCAmelCase ), 0 ) for detected_object in outputs: self.assertEqual( lowerCAmelCase, { '''score''': ANY(lowerCAmelCase ), '''label''': ANY(lowerCAmelCase ), '''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )}, }, ) @require_tf @unittest.skip('''Object detection not implemented in TF''' ) def lowercase__ ( self ): """simple docstring""" pass @require_torch def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''hf-internal-testing/tiny-detr-mobilenetsv3''' lowerCamelCase_ =AutoModelForObjectDetection.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =ObjectDetectionPipeline(model=lowerCAmelCase, feature_extractor=lowerCAmelCase ) lowerCamelCase_ =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=0.0 ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], ) lowerCamelCase_ =object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ], threshold=0.0, ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], [ {'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, {'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}}, ], ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''facebook/detr-resnet-50''' lowerCamelCase_ =AutoModelForObjectDetection.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =ObjectDetectionPipeline(model=lowerCAmelCase, feature_extractor=lowerCAmelCase ) lowerCamelCase_ =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ) lowerCamelCase_ =object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''facebook/detr-resnet-50''' lowerCamelCase_ =pipeline('''object-detection''', model=lowerCAmelCase ) lowerCamelCase_ =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ) lowerCamelCase_ =object_detector( [ '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''http://images.cocodataset.org/val2017/000000039769.jpg''', ] ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ [ {'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], [ {'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}}, {'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}}, {'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}}, {'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ], ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =0.9_9_8_5 lowerCamelCase_ ='''facebook/detr-resnet-50''' lowerCamelCase_ =pipeline('''object-detection''', model=lowerCAmelCase ) lowerCamelCase_ =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=lowerCAmelCase ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}}, {'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}}, ], ) @require_torch @require_pytesseract @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''Narsil/layoutlmv3-finetuned-funsd''' lowerCamelCase_ =0.9_9_9_3 lowerCamelCase_ =pipeline('''object-detection''', model=lowerCAmelCase, threshold=lowerCAmelCase ) lowerCamelCase_ =object_detector( '''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' ) self.assertEqual( nested_simplify(lowerCAmelCase, decimals=4 ), [ {'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, {'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}}, ], )
6
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] =['image_processor', 'tokenizer'] lowercase : Optional[int] ='AutoImageProcessor' lowercase : List[str] ='AutoTokenizer' def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =self.image_processor lowerCamelCase_ =False def __call__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase ) if len(lowerCAmelCase ) > 0: lowerCamelCase_ =args[0] lowerCamelCase_ =args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: lowerCamelCase_ =encodings['''input_ids'''] return inputs def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @contextmanager def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) lowerCamelCase_ =True lowerCamelCase_ =self.tokenizer yield lowerCamelCase_ =self.image_processor lowerCamelCase_ =False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ): """simple docstring""" if added_vocab is None: lowerCamelCase_ =self.tokenizer.get_added_vocab() lowerCamelCase_ ={} while tokens: lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE ) if start_token is None: break lowerCamelCase_ =start_token.group(1 ) lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE ) lowerCamelCase_ =start_token.group() if end_token is None: lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' ) else: lowerCamelCase_ =end_token.group() lowerCamelCase_ =re.escape(lowerCAmelCase ) lowerCamelCase_ =re.escape(lowerCAmelCase ) lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE ) if content is not None: lowerCamelCase_ =content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase ) if value: if len(lowerCAmelCase ) == 1: lowerCamelCase_ =value[0] lowerCamelCase_ =value else: # leaf nodes lowerCamelCase_ =[] for leaf in content.split(R'''<sep/>''' ): lowerCamelCase_ =leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": lowerCamelCase_ =leaf[1:-2] # for categorical special tokens output[key].append(lowerCAmelCase ) if len(output[key] ) == 1: lowerCamelCase_ =output[key][0] lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase ) if len(lowerCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Optional[Any] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =ShapEImgaImgPipeline lowercase : Dict =['image'] lowercase : str =['image'] lowercase : int =[ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] lowercase : int =False @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self ): """simple docstring""" return 8 @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, ) lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =CLIPImageProcessor( crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, ) return image_processor @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowerCamelCase_ =PriorTransformer(**lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowerCamelCase_ =ShapERenderer(**lowerCAmelCase ) return model def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_prior lowerCamelCase_ =self.dummy_image_encoder lowerCamelCase_ =self.dummy_image_processor lowerCamelCase_ =self.dummy_renderer lowerCamelCase_ =HeunDiscreteScheduler( beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, ) lowerCamelCase_ ={ '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowerCamelCase_ =np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase__ ( self ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device == '''cpu''' lowerCamelCase_ =True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =1 lowerCamelCase_ =2 lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) for key in inputs.keys(): if key in self.batch_params: lowerCamelCase_ =batch_size * [inputs[key]] lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) lowerCamelCase_ =pipe( lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
6
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin a_ : str = """ Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] """ class __UpperCamelCase ( unittest.TestCase , lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_tool('''text-question-answering''' ) self.tool.setup() lowerCamelCase_ =load_tool('''text-question-answering''', remote=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tool(lowerCAmelCase, '''What did Hugging Face do in April 2021?''' ) self.assertEqual(lowerCAmelCase, '''launched the BigScience Research Workshop''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.remote_tool(lowerCAmelCase, '''What did Hugging Face do in April 2021?''' ) self.assertEqual(lowerCAmelCase, '''launched the BigScience Research Workshop''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tool(text=lowerCAmelCase, question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(lowerCAmelCase, '''launched the BigScience Research Workshop''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.remote_tool(text=lowerCAmelCase, question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(lowerCAmelCase, '''launched the BigScience Research Workshop''' )
6
'''simple docstring''' from itertools import product def a_ ( __snake_case : int , __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =sides_number lowerCamelCase_ =max_face_number * dice_number lowerCamelCase_ =[0] * (max_total + 1) lowerCamelCase_ =1 lowerCamelCase_ =range(__snake_case , max_face_number + 1 ) for dice_numbers in product(__snake_case , repeat=__snake_case ): lowerCamelCase_ =sum(__snake_case ) totals_frequencies[total] += 1 return totals_frequencies def a_ ( ) -> float: """simple docstring""" lowerCamelCase_ =total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCamelCase_ =total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCamelCase_ =0 lowerCamelCase_ =9 lowerCamelCase_ =4 * 9 lowerCamelCase_ =6 for peter_total in range(__snake_case , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCamelCase_ =(4**9) * (6**6) lowerCamelCase_ =peter_wins_count / total_games_number lowerCamelCase_ =round(__snake_case , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer a_ : Tuple = logging.get_logger(__name__) a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_12, """facebook/dpr-ctx_encoder-multiset-base""": 5_12, } a_ : List[Any] = { """facebook/dpr-question_encoder-single-nq-base""": 5_12, """facebook/dpr-question_encoder-multiset-base""": 5_12, } a_ : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": 5_12, """facebook/dpr-reader-multiset-base""": 5_12, } a_ : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : Dict = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] =VOCAB_FILES_NAMES lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : Dict =DPRContextEncoderTokenizer class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : List[Any] =DPRQuestionEncoderTokenizer a_ : Union[str, Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) a_ : Dict = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(lowerCamelCase__ ) class __UpperCamelCase : def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if titles is None and texts is None: return super().__call__( lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) elif titles is None or texts is None: lowerCamelCase_ =titles if texts is None else texts return super().__call__( lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles] lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.''' lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ ={ '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase ) ] } if return_attention_mask is not False: lowerCamelCase_ =[] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCamelCase_ =attention_mask return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ): """simple docstring""" lowerCamelCase_ =reader_input['''input_ids'''] lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ ) lowerCamelCase_ =[] for doc_id in sorted_docs: lowerCamelCase_ =list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCamelCase_ =sequence_ids.index(self.pad_token_id ) else: lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =[] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase ) lowerCamelCase_ =[] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' lowerCamelCase_ =end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : int =VOCAB_FILES_NAMES lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION lowercase : int =['input_ids', 'attention_mask'] lowercase : Dict =DPRReaderTokenizer
6
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union a_ : Tuple = TypeVar("""T""") a_ : Dict = Union[List[T], Tuple[T, ...]] a_ : int = Union[T, List[T], Dict[str, T]] a_ : Optional[Any] = Union[str, bytes, os.PathLike]
6
1
'''simple docstring''' from typing import Any class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =data lowerCamelCase_ =None def __repr__( self ): """simple docstring""" return f'''Node({self.data})''' class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ =None def __iter__( self ): """simple docstring""" lowerCamelCase_ =self.head while node: yield node.data lowerCamelCase_ =node.next def __len__( self ): """simple docstring""" return sum(1 for _ in self ) def __repr__( self ): """simple docstring""" return "->".join([str(lowerCAmelCase ) for item in self] ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) lowerCamelCase_ =self.head for _ in range(lowerCAmelCase ): lowerCamelCase_ =current.next lowerCamelCase_ =data def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.insert_nth(len(self ), lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.insert_nth(0, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) lowerCamelCase_ =Node(lowerCAmelCase ) if self.head is None: lowerCamelCase_ =new_node elif index == 0: lowerCamelCase_ =self.head # link new_node to head lowerCamelCase_ =new_node else: lowerCamelCase_ =self.head for _ in range(index - 1 ): lowerCamelCase_ =temp.next lowerCamelCase_ =temp.next lowerCamelCase_ =new_node def lowercase__ ( self ): # print every node data """simple docstring""" print(self ) def lowercase__ ( self ): """simple docstring""" return self.delete_nth(0 ) def lowercase__ ( self ): # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def lowercase__ ( self, lowerCAmelCase = 0 ): """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) lowerCamelCase_ =self.head # default first node if index == 0: lowerCamelCase_ =self.head.next else: lowerCamelCase_ =self.head for _ in range(index - 1 ): lowerCamelCase_ =temp.next lowerCamelCase_ =temp.next lowerCamelCase_ =temp.next.next return delete_node.data def lowercase__ ( self ): """simple docstring""" return self.head is None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =None lowerCamelCase_ =self.head while current: # Store the current node's next node. lowerCamelCase_ =current.next # Make the current node's next point backwards lowerCamelCase_ =prev # Make the previous node be the current node lowerCamelCase_ =current # Make the current node the next node (to progress iteration) lowerCamelCase_ =next_node # Return prev in order to put the head at the end lowerCamelCase_ =prev def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =LinkedList() assert linked_list.is_empty() is True assert str(__snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__snake_case ) == i linked_list.insert_nth(__snake_case , i + 1 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__snake_case ) == 9 assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): lowerCamelCase_ =-i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) ) def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =[ -9, 100, Node(7734_5112 ), '''dlrow olleH''', 7, 5555, 0, -1_9_2.5_5_5_5_5, '''Hello, world!''', 7_7.9, Node(10 ), None, None, 1_2.2_0, ] lowerCamelCase_ =LinkedList() for i in test_input: linked_list.insert_tail(__snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head lowerCamelCase_ =linked_list.delete_head() assert result == -9 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail lowerCamelCase_ =linked_list.delete_tail() assert result == 1_2.2 assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list lowerCamelCase_ =linked_list.delete_nth(10 ) assert result is None assert ( str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__snake_case ) assert ( str(__snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def a_ ( ) -> Tuple: """simple docstring""" from doctest import testmod testmod() lowerCamelCase_ =LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(__snake_case ) print('''\nReading/changing Node data using indexing:''' ) print(F'''Element at Position 1: {linked_list[1]}''' ) lowerCamelCase_ =input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(__snake_case ) print(F'''length of linked_list is : {len(__snake_case )}''' ) if __name__ == "__main__": main()
6
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =search_prob lowerCamelCase_ =start_temperate lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =None while not search_end: lowerCamelCase_ =current_state.score() if best_state is None or current_score > best_state.score(): lowerCamelCase_ =current_state scores.append(__snake_case ) iterations += 1 lowerCamelCase_ =None lowerCamelCase_ =current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor lowerCamelCase_ =neighbors.pop(__snake_case ) lowerCamelCase_ =picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowerCamelCase_ =change * -1 # in case we are finding minimum if change > 0: # improves the solution lowerCamelCase_ =picked_neighbor else: lowerCamelCase_ =(math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowerCamelCase_ =picked_neighbor lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowerCamelCase_ =True else: lowerCamelCase_ =next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__snake_case ) , __snake_case ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing( prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : List[str] = simulated_annealing( prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return (3 * x**2) - (6 * y) a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" ) a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" )
6
1
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a_ : Any = logging.get_logger(__name__) a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/""" a_ : Any = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def a_ ( __snake_case : int ) -> Any: """simple docstring""" if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowerCamelCase_ =key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ ={} import re lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case ) elif re_encoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case ) elif re_encoder_block_proj_out.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case ) elif re_decoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case ) elif re_decoder_block_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case ) elif re_prior_cond_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case ) elif re_prior_cond_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case ) # keep original key else: lowerCamelCase_ =original_key lowerCamelCase_ =replace_key(__snake_case ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) lowerCamelCase_ =original_key lowerCamelCase_ =original_key lowerCamelCase_ =value return new_dict @torch.no_grad() def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case ) open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content ) lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]] lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case ) lowerCamelCase_ =JukeboxModel(__snake_case ) lowerCamelCase_ =[] lowerCamelCase_ ={} for i, dict_name in enumerate(__snake_case ): lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model'''] lowerCamelCase_ ={} for k in old_dic.keys(): if k.endswith('''.b''' ): lowerCamelCase_ =old_dic[k] elif k.endswith('''.w''' ): lowerCamelCase_ =old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowerCamelCase_ =old_dic[k] else: lowerCamelCase_ =old_dic[k] lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}''' lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case ) weight_dict.append(__snake_case ) lowerCamelCase_ =weight_dict.pop(0 ) model.vqvae.load_state_dict(__snake_case ) for i in range(len(__snake_case ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile: json.dump(__snake_case , __snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) return weight_dict if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) a_ : Optional[int] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
6
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a_ ( __snake_case : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def a_ ( __snake_case : List[Any] ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =emb.weight.shape lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case ) lowerCamelCase_ =emb.weight.data return lin_layer def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict: """simple docstring""" lowerCamelCase_ ={} for old_key in state_dict.keys(): lowerCamelCase_ =old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' ) else: lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCamelCase_ =state_dict[old_key] return new_dict def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =0 os.makedirs(__snake_case , exist_ok=__snake_case ) for expert in range(__snake_case ): lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(__snake_case ): lowerCamelCase_ =torch.load(__snake_case )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =os.path.join( __snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) torch.save(__snake_case , __snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__snake_case )[0]].dtype ) # Add the last block lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__snake_case ) == 1: lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) torch.save(__snake_case , __snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__snake_case , __snake_case ) # Otherwise, let's build the index lowerCamelCase_ ={} for idx, shard in enumerate(__snake_case ): lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' ) lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) for key in shard: lowerCamelCase_ =shard_file # Add the metadata lowerCamelCase_ ={'''total_size''': total_size} lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a_ : Tuple = parser.parse_args() a_ , a_ : int = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_28, args.dtype, ) a_ : Tuple = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28 ) config.save_pretrained(args.pytorch_dump_folder_path) a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
6
1
'''simple docstring''' from pathlib import Path import numpy as np from PIL import Image def a_ ( __snake_case : np.ndarray ) -> np.ndarray: """simple docstring""" lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b def a_ ( __snake_case : np.ndarray ) -> np.ndarray: """simple docstring""" return (gray > 127) & (gray <= 255) def a_ ( __snake_case : np.ndarray , __snake_case : np.ndarray ) -> np.ndarray: """simple docstring""" lowerCamelCase_ =np.zeros_like(__snake_case ) lowerCamelCase_ =np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image lowerCamelCase_ =image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): lowerCamelCase_ =( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() lowerCamelCase_ =int(summation > 0 ) return output if __name__ == "__main__": # read original image a_ : int = Path(__file__).resolve().parent / """image_data""" / """lena.jpg""" a_ : Optional[int] = np.array(Image.open(lena_path)) # kernel to be applied a_ : Any = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) a_ : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image a_ : Optional[Any] = Image.fromarray(output).convert("""RGB""") pil_img.save("""result_dilation.png""")
6
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =['image_processor', 'tokenizer'] lowercase : int ='LayoutLMv2ImageProcessor' lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ =features['''words'''] lowerCamelCase_ =self.tokenizer( text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, ) # add pixel values lowerCamelCase_ =features.pop('''pixel_values''' ) if return_overflowing_tokens is True: lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] ) lowerCamelCase_ =images return encoded_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' ) return images_with_overflow def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
1
'''simple docstring''' from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo a_ : Optional[Any] = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ a_ : Union[str, Any] = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ a_ : str = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ), } ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 1, lowerCAmelCase = 4, ): """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase, hypotheses=lowerCAmelCase, min_len=lowerCAmelCase, max_len=lowerCAmelCase ) }
6
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =VQModel lowercase : Union[str, Any] ='sample' @property def lowercase__ ( self, lowerCAmelCase=(32, 32) ): """simple docstring""" lowerCamelCase_ =4 lowerCamelCase_ =3 lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) return {"sample": image} @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={ '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 3, } lowerCamelCase_ =self.dummy_input return init_dict, inputs_dict def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ), 0 ) model.to(lowerCAmelCase ) lowerCamelCase_ =model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(lowerCAmelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size ) lowerCamelCase_ =image.to(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).sample lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
6
1
'''simple docstring''' import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=64, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=10, lowerCAmelCase=0.0_2, lowerCAmelCase=[1, 16, 4, 4], lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =scope lowerCamelCase_ =backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size lowerCamelCase_ =(self.image_size // 32) ** 2 lowerCamelCase_ =num_patches + 1 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={ '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 16, 32], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=lowerCAmelCase, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =ViTHybridModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.type_sequence_label_size lowerCamelCase_ =ViTHybridForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Optional[Any] =(ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () lowercase : List[Any] =( {'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification} if is_torch_available() else {} ) lowercase : Dict =False lowercase : Any =False lowercase : List[str] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ViTHybridModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase, hidden_size=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) lowerCamelCase_ =model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase, nn.Linear ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =_config_zero_init(lowerCAmelCase ) for model_class in self.all_model_classes: lowerCamelCase_ =model_class(config=lowerCAmelCase ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": lowerCamelCase_ =[f'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =ViTHybridModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def a_ ( ) -> Any: """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self ): """simple docstring""" return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( lowerCAmelCase ) lowerCamelCase_ =self.default_image_processor lowerCamelCase_ =prepare_img() lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase_ =model(**lowerCAmelCase ) # verify the logits lowerCamelCase_ =torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) ) @slow @require_accelerate def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) lowerCamelCase_ =ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' ) lowerCamelCase_ =prepare_img() lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ) lowerCamelCase_ =model(**lowerCAmelCase ) lowerCamelCase_ =outputs.logits # model predicts one of the 1000 ImageNet classes lowerCamelCase_ =logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
6
'''simple docstring''' import datasets from .evaluate import evaluate a_ : List[Any] = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ a_ : List[Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ a_ : Any = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} lowerCamelCase_ =[ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase ) return score
6
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Optional[Any] = logging.get_logger(__name__) a_ : List[Any] = { """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='decision_transformer' lowercase : Optional[int] =['past_key_values'] lowercase : List[str] ={ 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self, lowerCAmelCase=17, lowerCAmelCase=4, lowerCAmelCase=128, lowerCAmelCase=4_096, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=1_024, lowerCAmelCase=3, lowerCAmelCase=1, lowerCAmelCase=None, lowerCAmelCase="relu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=1e-5, lowerCAmelCase=0.0_2, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=50_256, lowerCAmelCase=50_256, lowerCAmelCase=False, lowerCAmelCase=False, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =state_dim lowerCamelCase_ =act_dim lowerCamelCase_ =hidden_size lowerCamelCase_ =max_ep_len lowerCamelCase_ =action_tanh lowerCamelCase_ =vocab_size lowerCamelCase_ =n_positions lowerCamelCase_ =n_layer lowerCamelCase_ =n_head lowerCamelCase_ =n_inner lowerCamelCase_ =activation_function lowerCamelCase_ =resid_pdrop lowerCamelCase_ =embd_pdrop lowerCamelCase_ =attn_pdrop lowerCamelCase_ =layer_norm_epsilon lowerCamelCase_ =initializer_range lowerCamelCase_ =scale_attn_weights lowerCamelCase_ =use_cache lowerCamelCase_ =scale_attn_by_inverse_layer_idx lowerCamelCase_ =reorder_and_upcast_attn lowerCamelCase_ =bos_token_id lowerCamelCase_ =eos_token_id super().__init__(bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
6
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer a_ : Tuple = logging.get_logger(__name__) a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_12, """facebook/dpr-ctx_encoder-multiset-base""": 5_12, } a_ : List[Any] = { """facebook/dpr-question_encoder-single-nq-base""": 5_12, """facebook/dpr-question_encoder-multiset-base""": 5_12, } a_ : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": 5_12, """facebook/dpr-reader-multiset-base""": 5_12, } a_ : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : Dict = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] =VOCAB_FILES_NAMES lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : Dict =DPRContextEncoderTokenizer class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : List[Any] =DPRQuestionEncoderTokenizer a_ : Union[str, Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) a_ : Dict = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(lowerCamelCase__ ) class __UpperCamelCase : def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if titles is None and texts is None: return super().__call__( lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) elif titles is None or texts is None: lowerCamelCase_ =titles if texts is None else texts return super().__call__( lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles] lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.''' lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ ={ '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase ) ] } if return_attention_mask is not False: lowerCamelCase_ =[] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCamelCase_ =attention_mask return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ): """simple docstring""" lowerCamelCase_ =reader_input['''input_ids'''] lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ ) lowerCamelCase_ =[] for doc_id in sorted_docs: lowerCamelCase_ =list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCamelCase_ =sequence_ids.index(self.pad_token_id ) else: lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =[] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase ) lowerCamelCase_ =[] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' lowerCamelCase_ =end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : int =VOCAB_FILES_NAMES lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION lowercase : int =['input_ids', 'attention_mask'] lowercase : Dict =DPRReaderTokenizer
6
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : List[str] = { """configuration_xlm_roberta""": [ """XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaConfig""", """XLMRobertaOnnxConfig""", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = ["""XLMRobertaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = ["""XLMRobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaForCausalLM""", """XLMRobertaForMaskedLM""", """XLMRobertaForMultipleChoice""", """XLMRobertaForQuestionAnswering""", """XLMRobertaForSequenceClassification""", """XLMRobertaForTokenClassification""", """XLMRobertaModel""", """XLMRobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMRobertaForCausalLM""", """TFXLMRobertaForMaskedLM""", """TFXLMRobertaForMultipleChoice""", """TFXLMRobertaForQuestionAnswering""", """TFXLMRobertaForSequenceClassification""", """TFXLMRobertaForTokenClassification""", """TFXLMRobertaModel""", """TFXLMRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ """FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxXLMRobertaForMaskedLM""", """FlaxXLMRobertaForCausalLM""", """FlaxXLMRobertaForMultipleChoice""", """FlaxXLMRobertaForQuestionAnswering""", """FlaxXLMRobertaForSequenceClassification""", """FlaxXLMRobertaForTokenClassification""", """FlaxXLMRobertaModel""", """FlaxXLMRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys a_ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase_ ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowerCamelCase_ =Dataset.from_dict(__snake_case ) return dataset class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ), 2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase ) self.assertEqual(len(lowerCAmelCase ), 2 ) print(lowerCAmelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
6
1
'''simple docstring''' def a_ ( __snake_case : int = 1000 ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =1, 1 lowerCamelCase_ =2 while True: lowerCamelCase_ =0 lowerCamelCase_ =fa + fa lowerCamelCase_, lowerCamelCase_ =fa, f index += 1 for _ in str(__snake_case ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
6
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a_ : Any = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
1
'''simple docstring''' import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : str , __snake_case : Tuple , __snake_case : Tuple ) -> Union[str, Any]: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =TaConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =TaForConditionalGeneration(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_ta(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(__snake_case ) if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
6
'''simple docstring''' from collections import defaultdict from math import gcd def a_ ( __snake_case : int = 150_0000 ) -> int: """simple docstring""" lowerCamelCase_ =defaultdict(__snake_case ) lowerCamelCase_ =2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ): if gcd(__snake_case , __snake_case ) > 1: continue lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__snake_case , limit + 1 , __snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' from collections import defaultdict from math import gcd def a_ ( __snake_case : int = 150_0000 ) -> int: """simple docstring""" lowerCamelCase_ =defaultdict(__snake_case ) lowerCamelCase_ =2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ): if gcd(__snake_case , __snake_case ) > 1: continue lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__snake_case , limit + 1 , __snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
6
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Tuple = 16 a_ : Optional[int] = 32 def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__snake_case : int ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_ =datasets.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_ =16 elif accelerator.mixed_precision != "no": lowerCamelCase_ =8 else: lowerCamelCase_ =None return tokenizer.pad( __snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCamelCase_ =DataLoader( tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) lowerCamelCase_ =DataLoader( tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ : Tuple = mocked_dataloaders # noqa: F811 def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1": lowerCamelCase_ =2 # Initialize accelerator lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ =config['''lr'''] lowerCamelCase_ =int(config['''num_epochs'''] ) lowerCamelCase_ =int(config['''seed'''] ) lowerCamelCase_ =int(config['''batch_size'''] ) lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__snake_case ) def inner_training_loop(__snake_case : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ =model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case ) lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case ) # Instantiate scheduler lowerCamelCase_ =get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.logits.argmax(dim=-1 ) lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) lowerCamelCase_ =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __snake_case ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowerCamelCase_ =parser.parse_args() lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
6
1
'''simple docstring''' import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a_ : Optional[Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=3, lowerCAmelCase=18, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =size if size is not None else {'''height''': 20, '''width''': 20} lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =image_size lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution lowerCamelCase_ =size lowerCamelCase_ =do_normalize lowerCamelCase_ =do_convert_rgb lowerCamelCase_ =[512, 1_024, 2_048, 4_096] lowerCamelCase_ =patch_size if patch_size is not None else {'''height''': 16, '''width''': 16} def lowercase__ ( self ): """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg''' lowerCamelCase_ =Image.open(requests.get(lowerCAmelCase, stream=lowerCAmelCase ).raw ).convert('''RGB''' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : List[str] =PixaStructImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =PixaStructImageProcessingTester(self ) @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_convert_rgb''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processor_tester.prepare_dummy_image() lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) lowerCamelCase_ =2_048 lowerCamelCase_ =image_processor(lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_6_0_6 ), atol=1e-3, rtol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCamelCase_ =image_processor( image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowerCamelCase_ =image_processor( lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 lowerCamelCase_ =True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowerCAmelCase ): lowerCamelCase_ =image_processor( image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches lowerCamelCase_ ='''Hello''' lowerCamelCase_ =image_processor( image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase, header_text=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowerCamelCase_ =image_processor( lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase, header_text=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, np.ndarray ) lowerCamelCase_ =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCamelCase_ =image_processor( image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowerCamelCase_ =image_processor( lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, torch.Tensor ) # Test not batched input lowerCamelCase_ =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCamelCase_ =image_processor( image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowerCamelCase_ =image_processor( lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =PixaStructImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =PixaStructImageProcessingTester(self, num_channels=4 ) lowerCamelCase_ =3 @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_convert_rgb''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =( (self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width''']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input lowerCamelCase_ =image_processor( image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (1, max_patch, expected_hidden_dim), ) # Test batched lowerCamelCase_ =image_processor( lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches self.assertEqual( encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
6
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py a_ : List[str] = """src/diffusers""" # Matches is_xxx_available() a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") a_ : Optional[Any] = """ {0} = None """ a_ : List[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ a_ : Optional[Any] = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def a_ ( __snake_case : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =_re_backend.findall(__snake_case ) if len(__snake_case ) == 0: return None return "_and_".join(__snake_case ) def a_ ( ) -> Optional[int]: """simple docstring""" with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase_ =0 lowerCamelCase_ ={} # Go through the end of the file while line_index < len(__snake_case ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase_ =find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 lowerCamelCase_ =[] # Until we unindent, add backend objects to the list while line_index < len(__snake_case ) and len(lines[line_index] ) > 1: lowerCamelCase_ =lines[line_index] lowerCamelCase_ =_re_single_line_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__snake_case ) > 0: lowerCamelCase_ =objects else: line_index += 1 return backend_specific_objects def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(__snake_case ) elif name.islower(): return DUMMY_FUNCTION.format(__snake_case , __snake_case ) else: return DUMMY_CLASS.format(__snake_case , __snake_case ) def a_ ( __snake_case : Tuple=None ) -> List[str]: """simple docstring""" if backend_specific_objects is None: lowerCamelCase_ =read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase_ ={} for backend, objects in backend_specific_objects.items(): lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']''' lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] ) lowerCamelCase_ =dummy_file return dummy_files def a_ ( __snake_case : Dict=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase_ ={'''torch''': '''pt'''} # Locate actual dummy modules and read their content. lowerCamelCase_ =os.path.join(__snake_case , '''utils''' ) lowerCamelCase_ ={ backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase_ ={} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.read() else: lowerCamelCase_ ='''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main ''' '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` ''' '''to fix this.''' ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : Tuple = parser.parse_args() check_dummies(args.fix_and_overwrite)
6
1
'''simple docstring''' import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =(CMStochasticIterativeScheduler,) lowercase : Dict =10 def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={ '''num_train_timesteps''': 201, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } config.update(**lowerCAmelCase ) return config def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =10 lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =self.scheduler_classes[0](**lowerCAmelCase ) scheduler.set_timesteps(lowerCAmelCase ) lowerCamelCase_ =scheduler.timesteps[0] lowerCamelCase_ =scheduler.timesteps[1] lowerCamelCase_ =self.dummy_sample lowerCamelCase_ =0.1 * sample lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape, sample.shape ) self.assertEqual(output_a.shape, output_a.shape ) def lowercase__ ( self ): """simple docstring""" for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =1 scheduler.set_timesteps(lowerCAmelCase ) lowerCamelCase_ =scheduler.timesteps lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(lowerCAmelCase ): # 1. scale model input lowerCamelCase_ =scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase ) # 2. predict noise residual lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase ) # 3. predict previous sample x_t-1 lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =pred_prev_sample lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) ) lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) ) assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2 assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =[106, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase ) lowerCamelCase_ =scheduler.timesteps lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowerCamelCase_ =scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase ) # 2. predict noise residual lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase ) # 3. predict previous sample x_t-1 lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =pred_prev_sample lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) ) lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) ) assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2 assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =[39, 30, 12, 15, 0] with self.assertRaises(lowerCAmelCase, msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =[39, 30, 12, 1, 0] lowerCamelCase_ =len(lowerCAmelCase ) with self.assertRaises(lowerCAmelCase, msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase, timesteps=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =[scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase, msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''', ): scheduler.set_timesteps(timesteps=lowerCAmelCase )
6
'''simple docstring''' a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def a_ ( __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ =0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution a_ : list[bool | None] = [None] * 10_00_00_00 a_ : List[Any] = True a_ : Optional[Any] = False def a_ ( __snake_case : int ) -> bool: """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase_ =chain(next_number(__snake_case ) ) lowerCamelCase_ =number_chain while number < 1000_0000: lowerCamelCase_ =number_chain number *= 10 return number_chain def a_ ( __snake_case : int = 1000_0000 ) -> int: """simple docstring""" for i in range(1 , __snake_case ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution() = }""")
6
1
'''simple docstring''' import datasets from .evaluate import evaluate a_ : List[Any] = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ a_ : List[Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ a_ : Any = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} lowerCamelCase_ =[ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase ) return score
6
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a_ ( __snake_case : Tuple ) -> str: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class __UpperCamelCase ( lowerCamelCase__ ): @staticmethod def lowercase__ ( lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =parser.add_parser('''download''' ) download_parser.add_argument( '''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' ) download_parser.add_argument( '''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' ) download_parser.add_argument( '''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', ) download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' ) download_parser.set_defaults(func=lowerCAmelCase ) def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model lowerCamelCase_ =cache lowerCamelCase_ =force lowerCamelCase_ =trust_remote_code def lowercase__ ( self ): """simple docstring""" from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
6
1
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if tokenize_kwargs is None: lowerCamelCase_ ={} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( '''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' ) lowerCamelCase_ =truncation lowerCamelCase_ =tokenize_kwargs lowerCamelCase_ ={} if return_tensors is not None: lowerCamelCase_ =return_tensors return preprocess_params, {}, postprocess_params def lowercase__ ( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.framework lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) return model_inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.model(**lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(*lowerCAmelCase, **lowerCAmelCase )
6
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features a_ : List[str] = logging.get_logger(__name__) a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : lowercase : str =field( default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} ) lowercase : str =field( default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) lowercase : int =field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowercase : int =field( default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) lowercase : int =field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) lowercase : int =field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) lowercase : float =field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase : int =field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase : int =field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='train' lowercase : Any ='dev' class __UpperCamelCase ( lowerCamelCase__ ): lowercase : SquadDataTrainingArguments lowercase : List[SquadFeatures] lowercase : Split lowercase : bool def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ): """simple docstring""" lowerCamelCase_ =args lowerCamelCase_ =is_language_sensitive lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowerCAmelCase, lowerCAmelCase ): try: lowerCamelCase_ =Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) lowerCamelCase_ =mode # Load data features from cache or dataset file lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1''' lowerCamelCase_ =os.path.join( cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase_ =cached_features_file + '''.lock''' with FileLock(lowerCAmelCase ): if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache: lowerCamelCase_ =time.time() lowerCamelCase_ =torch.load(lowerCAmelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase_ =self.old_features['''features'''] lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase ) lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ''' future run''' ) else: if mode == Split.dev: lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase_ =self.processor.get_train_examples(args.data_dir ) lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features( examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, ) lowerCamelCase_ =time.time() torch.save( {'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.features[i] lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float ) lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': attention_mask, '''token_type_ids''': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} ) if self.args.version_2_with_negative: inputs.update({'''is_impossible''': is_impossible} ) if self.is_language_sensitive: inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} ) return inputs
6
1
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : Any = {"""vocab_file""": """vocab.txt"""} a_ : str = { """vocab_file""": { """facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""", """facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""", }, } a_ : str = { """facebook/esm2_t6_8M_UR50D""": 10_24, """facebook/esm2_t12_35M_UR50D""": 10_24, } def a_ ( __snake_case : str ) -> Any: """simple docstring""" with open(__snake_case , '''r''' ) as f: lowerCamelCase_ =f.read().splitlines() return [l.strip() for l in lines] class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str =VOCAB_FILES_NAMES lowercase : List[str] =PRETRAINED_VOCAB_FILES_MAP lowercase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[Any] =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<unk>", lowerCAmelCase="<cls>", lowerCAmelCase="<pad>", lowerCAmelCase="<mask>", lowerCAmelCase="<eos>", **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =load_vocab_file(lowerCAmelCase ) lowerCamelCase_ =dict(enumerate(self.all_tokens ) ) lowerCamelCase_ ={tok: ind for ind, tok in enumerate(self.all_tokens )} lowerCamelCase_ =unk_token lowerCamelCase_ =cls_token lowerCamelCase_ =pad_token lowerCamelCase_ =mask_token lowerCamelCase_ =eos_token lowerCamelCase_ =self.all_tokens self._create_trie(self.unique_no_split_tokens ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self._id_to_token.get(lowerCAmelCase, self.unk_token ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self._token_to_id.get(lowerCAmelCase, self._token_to_id.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return text.split() def lowercase__ ( self, lowerCAmelCase=False ): """simple docstring""" return len(self._id_to_token ) def lowercase__ ( self ): """simple docstring""" return {token: i for i, token in enumerate(self.all_tokens )} def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self._token_to_id.get(lowerCAmelCase, self._token_to_id.get(self.unk_token ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self._id_to_token.get(lowerCAmelCase, self.unk_token ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[self.cls_token_id] lowerCamelCase_ =[self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowerCamelCase_ =[1] + ([0] * len(lowerCAmelCase )) + [1] if token_ids_a is not None: mask += [0] * len(lowerCAmelCase ) + [1] return mask def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =os.path.join(lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(lowerCAmelCase, '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def lowercase__ ( self ): """simple docstring""" return self.get_vocab_size(with_added_tokens=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False ): """simple docstring""" return super()._add_tokens(lowerCAmelCase, special_tokens=lowerCAmelCase )
6
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a_ : Any = logging.get_logger(__name__) a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/""" a_ : Any = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def a_ ( __snake_case : int ) -> Any: """simple docstring""" if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowerCamelCase_ =key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ ={} import re lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case ) elif re_encoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case ) elif re_encoder_block_proj_out.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case ) elif re_decoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case ) elif re_decoder_block_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case ) elif re_prior_cond_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case ) elif re_prior_cond_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case ) # keep original key else: lowerCamelCase_ =original_key lowerCamelCase_ =replace_key(__snake_case ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) lowerCamelCase_ =original_key lowerCamelCase_ =original_key lowerCamelCase_ =value return new_dict @torch.no_grad() def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case ) open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content ) lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]] lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case ) lowerCamelCase_ =JukeboxModel(__snake_case ) lowerCamelCase_ =[] lowerCamelCase_ ={} for i, dict_name in enumerate(__snake_case ): lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model'''] lowerCamelCase_ ={} for k in old_dic.keys(): if k.endswith('''.b''' ): lowerCamelCase_ =old_dic[k] elif k.endswith('''.w''' ): lowerCamelCase_ =old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowerCamelCase_ =old_dic[k] else: lowerCamelCase_ =old_dic[k] lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}''' lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case ) weight_dict.append(__snake_case ) lowerCamelCase_ =weight_dict.pop(0 ) model.vqvae.load_state_dict(__snake_case ) for i in range(len(__snake_case ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile: json.dump(__snake_case , __snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) return weight_dict if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) a_ : Optional[int] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
6
1
'''simple docstring''' a_ : int = 2_56 # Modulus to hash a string a_ : Union[str, Any] = 1_00_00_03 def a_ ( __snake_case : str , __snake_case : str ) -> bool: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) if p_len > t_len: return False lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =1 # Calculating the hash of pattern and substring of text for i in range(__snake_case ): lowerCamelCase_ =(ord(pattern[i] ) + p_hash * alphabet_size) % modulus lowerCamelCase_ =(ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue lowerCamelCase_ =(modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash lowerCamelCase_ =( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ ='''abc1abc12''' lowerCamelCase_ ='''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowerCamelCase_ ='''alskfjaldsk23adsfabcabc''' assert rabin_karp(__snake_case , __snake_case ) and not rabin_karp(__snake_case , __snake_case ) # Test 2) lowerCamelCase_ ='''ABABX''' lowerCamelCase_ ='''ABABZABABYABABX''' assert rabin_karp(__snake_case , __snake_case ) # Test 3) lowerCamelCase_ ='''AAAB''' lowerCamelCase_ ='''ABAAAAAB''' assert rabin_karp(__snake_case , __snake_case ) # Test 4) lowerCamelCase_ ='''abcdabcy''' lowerCamelCase_ ='''abcxabcdabxabcdabcdabcy''' assert rabin_karp(__snake_case , __snake_case ) # Test 5) lowerCamelCase_ ='''Lü''' lowerCamelCase_ ='''Lüsai''' assert rabin_karp(__snake_case , __snake_case ) lowerCamelCase_ ='''Lue''' assert not rabin_karp(__snake_case , __snake_case ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
6
'''simple docstring''' def a_ ( __snake_case : int = 1000 ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =1, 1 lowerCamelCase_ =2 while True: lowerCamelCase_ =0 lowerCamelCase_ =fa + fa lowerCamelCase_, lowerCamelCase_ =fa, f index += 1 for _ in str(__snake_case ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
6
1
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : int =KandinskyVaaPriorPipeline lowercase : Tuple =['prompt'] lowercase : int =['prompt', 'negative_prompt'] lowercase : Optional[Any] =[ 'num_images_per_prompt', 'generator', 'num_inference_steps', 'latents', 'negative_prompt', 'guidance_scale', 'output_type', 'return_dict', ] lowercase : Optional[Any] =False @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self ): """simple docstring""" return 100 @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) return CLIPTextModelWithProjection(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''num_attention_heads''': 2, '''attention_head_dim''': 12, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } lowerCamelCase_ =PriorTransformer(**lowerCAmelCase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 lowerCamelCase_ =nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) lowerCamelCase_ =CLIPVisionModelWithProjection(lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =CLIPImageProcessor( crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, ) return image_processor def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_prior lowerCamelCase_ =self.dummy_image_encoder lowerCamelCase_ =self.dummy_text_encoder lowerCamelCase_ =self.dummy_tokenizer lowerCamelCase_ =self.dummy_image_processor lowerCamelCase_ =UnCLIPScheduler( variance_type='''fixed_small_log''', prediction_type='''sample''', num_train_timesteps=1_000, clip_sample=lowerCAmelCase, clip_sample_range=1_0.0, ) lowerCamelCase_ ={ '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) ) lowerCamelCase_ =output.image_embeds lowerCamelCase_ =pipe( **self.get_dummy_inputs(lowerCAmelCase ), return_dict=lowerCAmelCase, )[0] lowerCamelCase_ =image[0, -10:] lowerCamelCase_ =image_from_tuple[0, -10:] assert image.shape == (1, 32) lowerCamelCase_ =np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device == '''cpu''' lowerCamelCase_ =True lowerCamelCase_ =False self._test_inference_batch_single_identical( test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, test_mean_pixel_difference=lowerCAmelCase, ) @skip_mps def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device == '''cpu''' lowerCamelCase_ =False self._test_attention_slicing_forward_pass( test_max_difference=lowerCAmelCase, test_mean_pixel_difference=lowerCAmelCase, )
6
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def a_ ( __snake_case : Any ) -> Tuple: """simple docstring""" lowerCamelCase_ =test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' F'''{test_file} instead.''' ) lowerCamelCase_ =components[-1] if not test_fn.endswith('''py''' ): raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )] lowerCamelCase_ ='''.'''.join(__snake_case ) return test_module_path def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_module_path(__snake_case ) lowerCamelCase_ =importlib.import_module(__snake_case ) return test_module def a_ ( __snake_case : Dict ) -> Tuple: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(__snake_case , __snake_case ) ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] ) if len(__snake_case ) > 0: test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =test_class() if hasattr(__snake_case , '''setUp''' ): test.setUp() lowerCamelCase_ =None if hasattr(__snake_case , '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: lowerCamelCase_ =test.model_tester.__class__ return model_tester def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =[] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case ) lowerCamelCase_ =[] for test_class in test_classes: lowerCamelCase_ =get_model_tester_from_test_class(__snake_case ) if tester_class is not None: tester_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Tuple ) -> Tuple: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes} return test_tester_mapping def a_ ( __snake_case : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_test_mapping def a_ ( __snake_case : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_to_tester_mapping def a_ ( __snake_case : List[str] ) -> List[Any]: """simple docstring""" if isinstance(__snake_case , __snake_case ): return o elif isinstance(__snake_case , __snake_case ): return o.__name__ elif isinstance(__snake_case , (list, tuple) ): return [to_json(__snake_case ) for x in o] elif isinstance(__snake_case , __snake_case ): return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()} else: return o
6
1
'''simple docstring''' import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 a_ : List[Any] = { """return_dict""": False, """output_hidden_states""": True, """output_attentions""": True, """torchscript""": True, """torch_dtype""": """float16""", """use_bfloat16""": True, """tf_legacy_loss""": True, """pruned_heads""": {"""a""": 1}, """tie_word_embeddings""": False, """is_decoder""": True, """cross_attention_hidden_size""": 1_28, """add_cross_attention""": True, """tie_encoder_decoder""": True, """max_length""": 50, """min_length""": 3, """do_sample""": True, """early_stopping""": True, """num_beams""": 3, """num_beam_groups""": 3, """diversity_penalty""": 0.5, """temperature""": 2.0, """top_k""": 10, """top_p""": 0.7, """typical_p""": 0.2, """repetition_penalty""": 0.8, """length_penalty""": 0.8, """no_repeat_ngram_size""": 5, """encoder_no_repeat_ngram_size""": 5, """bad_words_ids""": [1, 2, 3], """num_return_sequences""": 3, """chunk_size_feed_forward""": 5, """output_scores""": True, """return_dict_in_generate""": True, """forced_bos_token_id""": 2, """forced_eos_token_id""": 3, """remove_invalid_values""": True, """architectures""": ["""BertModel"""], """finetuning_task""": """translation""", """id2label""": {0: """label"""}, """label2id""": {"""label""": """0"""}, """tokenizer_class""": """BertTokenizerFast""", """prefix""": """prefix""", """bos_token_id""": 6, """pad_token_id""": 7, """eos_token_id""": 8, """sep_token_id""": 9, """decoder_start_token_id""": 10, """exponential_decay_length_penalty""": (5, 1.01), """suppress_tokens""": [0, 1], """begin_suppress_tokens""": 2, """task_specific_params""": {"""translation""": """some_params"""}, """problem_type""": """regression""", } @is_staging_test class __UpperCamelCase ( unittest.TestCase ): @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =TOKEN HfFolder.save_token(lowerCAmelCase ) @classmethod def lowercase__ ( cls ): """simple docstring""" try: delete_repo(token=cls._token, repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='''test-dynamic-config''' ) except HTTPError: pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub('''test-config''', use_auth_token=self._token ) lowerCamelCase_ =BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) ) # Reset repo delete_repo(token=self._token, repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase, repo_id='''test-config''', push_to_hub=lowerCAmelCase, use_auth_token=self._token ) lowerCamelCase_ =BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) config.push_to_hub('''valid_org/test-config-org''', use_auth_token=self._token ) lowerCamelCase_ =BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) ) # Reset repo delete_repo(token=self._token, repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCAmelCase, repo_id='''valid_org/test-config-org''', push_to_hub=lowerCAmelCase, use_auth_token=self._token ) lowerCamelCase_ =BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" CustomConfig.register_for_auto_class() lowerCamelCase_ =CustomConfig(attribute=42 ) config.push_to_hub('''test-dynamic-config''', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map, {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) lowerCamelCase_ =AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCAmelCase ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__, '''CustomConfig''' ) self.assertEqual(new_config.attribute, 42 ) class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated lowerCamelCase_ =c.n_embd + 1 # int lowerCamelCase_ =c.resid_pdrop + 1.0 # float lowerCamelCase_ =not c.scale_attn_weights # bool lowerCamelCase_ =c.summary_type + '''foo''' # str c.update_from_string( f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowerCAmelCase, c.n_embd, '''mismatch for key: n_embd''' ) self.assertEqual(lowerCAmelCase, c.resid_pdrop, '''mismatch for key: resid_pdrop''' ) self.assertEqual(lowerCAmelCase, c.scale_attn_weights, '''mismatch for key: scale_attn_weights''' ) self.assertEqual(lowerCAmelCase, c.summary_type, '''mismatch for key: summary_type''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =PretrainedConfig() lowerCamelCase_ =[key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCAmelCase, ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) lowerCamelCase_ =[key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase, lowerCAmelCase )] if len(lowerCAmelCase ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' f''' {', '.join(lowerCAmelCase )}.''' ) def lowercase__ ( self ): """simple docstring""" with self.assertRaises(lowerCAmelCase ): # config is in subfolder, the following should not work without specifying the subfolder lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''', subfolder='''bert''' ) self.assertIsNotNone(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =mock.Mock() lowerCamelCase_ =500 lowerCamelCase_ ={} lowerCamelCase_ =HTTPError lowerCamelCase_ ={} # Download this model to make sure it's in the cache. lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''', return_value=lowerCAmelCase ) as mock_head: lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =AutoConfig.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ =['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCAmelCase ) lowerCamelCase_ =2 json.dump(configuration.to_dict(), open(os.path.join(lowerCAmelCase, '''config.4.0.0.json''' ), '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertEqual(new_configuration.hidden_size, 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 lowerCamelCase_ =['''config.42.0.0.json'''] lowerCamelCase_ =768 configuration.save_pretrained(lowerCAmelCase ) shutil.move(os.path.join(lowerCAmelCase, '''config.4.0.0.json''' ), os.path.join(lowerCAmelCase, '''config.42.0.0.json''' ) ) lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) self.assertEqual(new_configuration.hidden_size, 768 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''hf-internal-testing/test-two-configs''' import transformers as new_transformers lowerCamelCase_ ='''v4.0.0''' lowerCamelCase_, lowerCamelCase_ =new_transformers.models.auto.AutoConfig.from_pretrained( lowerCAmelCase, return_unused_kwargs=lowerCAmelCase ) self.assertEqual(new_configuration.hidden_size, 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCAmelCase, {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers lowerCamelCase_ ='''v3.0.0''' lowerCamelCase_ =old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase ) self.assertEqual(old_configuration.hidden_size, 768 )
6
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] )
6
1
'''simple docstring''' import sys a_ : Optional[int] = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def a_ ( __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =1 for digit in s: product *= int(__snake_case ) return product def a_ ( __snake_case : str = N ) -> int: """simple docstring""" lowerCamelCase_ =-sys.maxsize - 1 lowerCamelCase_ =n[:13] lowerCamelCase_ =13 while cur_index < len(__snake_case ) - 13: if int(n[cur_index] ) >= int(substr[0] ): lowerCamelCase_ =substr[1:] + n[cur_index] cur_index += 1 else: lowerCamelCase_ =max(__snake_case , str_eval(__snake_case ) ) lowerCamelCase_ =n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
6
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] =['image_processor', 'tokenizer'] lowercase : Optional[int] ='AutoImageProcessor' lowercase : List[str] ='AutoTokenizer' def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =self.image_processor lowerCamelCase_ =False def __call__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase ) if len(lowerCAmelCase ) > 0: lowerCamelCase_ =args[0] lowerCamelCase_ =args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: lowerCamelCase_ =encodings['''input_ids'''] return inputs def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @contextmanager def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) lowerCamelCase_ =True lowerCamelCase_ =self.tokenizer yield lowerCamelCase_ =self.image_processor lowerCamelCase_ =False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ): """simple docstring""" if added_vocab is None: lowerCamelCase_ =self.tokenizer.get_added_vocab() lowerCamelCase_ ={} while tokens: lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE ) if start_token is None: break lowerCamelCase_ =start_token.group(1 ) lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE ) lowerCamelCase_ =start_token.group() if end_token is None: lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' ) else: lowerCamelCase_ =end_token.group() lowerCamelCase_ =re.escape(lowerCAmelCase ) lowerCamelCase_ =re.escape(lowerCAmelCase ) lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE ) if content is not None: lowerCamelCase_ =content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase ) if value: if len(lowerCAmelCase ) == 1: lowerCamelCase_ =value[0] lowerCamelCase_ =value else: # leaf nodes lowerCamelCase_ =[] for leaf in content.split(R'''<sep/>''' ): lowerCamelCase_ =leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": lowerCamelCase_ =leaf[1:-2] # for categorical special tokens output[key].append(lowerCAmelCase ) if len(output[key] ) == 1: lowerCamelCase_ =output[key][0] lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase ) if len(lowerCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : Any = logging.get_logger(__name__) a_ : Optional[Any] = { """SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str ='deformable_detr' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=3, lowerCAmelCase=300, lowerCAmelCase=1_024, lowerCAmelCase=6, lowerCAmelCase=1_024, lowerCAmelCase=8, lowerCAmelCase=6, lowerCAmelCase=1_024, lowerCAmelCase=8, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase="relu", lowerCAmelCase=256, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase="sine", lowerCAmelCase="resnet50", lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=4, lowerCAmelCase=4, lowerCAmelCase=4, lowerCAmelCase=False, lowerCAmelCase=300, lowerCAmelCase=False, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=1, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=0.1, lowerCAmelCase=0.2_5, lowerCAmelCase=False, **lowerCAmelCase, ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCamelCase_ =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =backbone_config.get('''model_type''' ) lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ =config_class.from_dict(lowerCAmelCase ) lowerCamelCase_ =use_timm_backbone lowerCamelCase_ =backbone_config lowerCamelCase_ =num_channels lowerCamelCase_ =num_queries lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =init_xavier_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =auxiliary_loss lowerCamelCase_ =position_embedding_type lowerCamelCase_ =backbone lowerCamelCase_ =use_pretrained_backbone lowerCamelCase_ =dilation # deformable attributes lowerCamelCase_ =num_feature_levels lowerCamelCase_ =encoder_n_points lowerCamelCase_ =decoder_n_points lowerCamelCase_ =two_stage lowerCamelCase_ =two_stage_num_proposals lowerCamelCase_ =with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =mask_loss_coefficient lowerCamelCase_ =dice_loss_coefficient lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient lowerCamelCase_ =focal_alpha lowerCamelCase_ =disable_custom_kernels super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def lowercase__ ( self ): """simple docstring""" return self.d_model def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCamelCase_ =self.backbone_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
6
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =ShapEImgaImgPipeline lowercase : Dict =['image'] lowercase : str =['image'] lowercase : int =[ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] lowercase : int =False @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self ): """simple docstring""" return 8 @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, ) lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =CLIPImageProcessor( crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, ) return image_processor @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowerCamelCase_ =PriorTransformer(**lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowerCamelCase_ =ShapERenderer(**lowerCAmelCase ) return model def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_prior lowerCamelCase_ =self.dummy_image_encoder lowerCamelCase_ =self.dummy_image_processor lowerCamelCase_ =self.dummy_renderer lowerCamelCase_ =HeunDiscreteScheduler( beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, ) lowerCamelCase_ ={ '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowerCamelCase_ =np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase__ ( self ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device == '''cpu''' lowerCamelCase_ =True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =1 lowerCamelCase_ =2 lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) for key in inputs.keys(): if key in self.batch_params: lowerCamelCase_ =batch_size * [inputs[key]] lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) lowerCamelCase_ =pipe( lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
6
1
'''simple docstring''' def a_ ( __snake_case : str ) -> bool: """simple docstring""" return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') ) def a_ ( __snake_case : str ) -> bool: """simple docstring""" lowerCamelCase_ =credit_card_number lowerCamelCase_ =0 lowerCamelCase_ =len(__snake_case ) - 2 for i in range(__snake_case , -1 , -2 ): # double the value of every second digit lowerCamelCase_ =int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 lowerCamelCase_ =cc_number[:i] + str(__snake_case ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__snake_case ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def a_ ( __snake_case : str ) -> bool: """simple docstring""" lowerCamelCase_ =F'''{credit_card_number} is an invalid credit card number because''' if not credit_card_number.isdigit(): print(F'''{error_message} it has nonnumerical characters.''' ) return False if not 13 <= len(__snake_case ) <= 16: print(F'''{error_message} of its length.''' ) return False if not validate_initial_digits(__snake_case ): print(F'''{error_message} of its first two digits.''' ) return False if not luhn_validation(__snake_case ): print(F'''{error_message} it fails the Luhn check.''' ) return False print(F'''{credit_card_number} is a valid credit card number.''' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
6
'''simple docstring''' from itertools import product def a_ ( __snake_case : int , __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =sides_number lowerCamelCase_ =max_face_number * dice_number lowerCamelCase_ =[0] * (max_total + 1) lowerCamelCase_ =1 lowerCamelCase_ =range(__snake_case , max_face_number + 1 ) for dice_numbers in product(__snake_case , repeat=__snake_case ): lowerCamelCase_ =sum(__snake_case ) totals_frequencies[total] += 1 return totals_frequencies def a_ ( ) -> float: """simple docstring""" lowerCamelCase_ =total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCamelCase_ =total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCamelCase_ =0 lowerCamelCase_ =9 lowerCamelCase_ =4 * 9 lowerCamelCase_ =6 for peter_total in range(__snake_case , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCamelCase_ =(4**9) * (6**6) lowerCamelCase_ =peter_wins_count / total_games_number lowerCamelCase_ =round(__snake_case , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=4, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_attention_mask lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_choices def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =None if self.use_attention_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowerCamelCase_ =BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ =True lowerCamelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Tuple =True lowercase : int =( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaxBertModelTester(self ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaxBertModel.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ =model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase )
6
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union a_ : Tuple = TypeVar("""T""") a_ : Dict = Union[List[T], Tuple[T, ...]] a_ : int = Union[T, List[T], Dict[str, T]] a_ : Optional[Any] = Union[str, bytes, os.PathLike]
6
1
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES a_ : List[Any] = logging.get_logger(__name__) a_ : Union[str, Any] = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) a_ : Optional[Any] = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) a_ : str = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) a_ : str = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) a_ : int = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) a_ : Union[str, Any] = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) a_ : Optional[int] = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) a_ : Tuple = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) a_ : Optional[int] = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) a_ : List[Any] = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) a_ : Optional[int] = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) a_ : Union[str, Any] = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) a_ : List[str] = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) a_ : Dict = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) a_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) a_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) a_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) a_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) a_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) a_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) a_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) a_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) a_ : int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) a_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) a_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) a_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) a_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : Union[str, Any] =FLAX_MODEL_MAPPING a_ : List[Any] = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : Tuple =FLAX_MODEL_FOR_PRETRAINING_MAPPING a_ : int = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : Optional[int] =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING a_ : Tuple = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : str =FLAX_MODEL_FOR_MASKED_LM_MAPPING a_ : Tuple = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : Union[str, Any] =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a_ : int = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : str =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING a_ : Union[str, Any] = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : List[str] =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING a_ : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : Optional[Any] =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING a_ : Dict = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : int =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING a_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : str =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING a_ : List[Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : Optional[int] =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING a_ : str = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : Dict =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Union[str, Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class __UpperCamelCase ( _BaseAutoModelClass ): lowercase : Optional[Any] =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING a_ : Tuple = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
6
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =search_prob lowerCamelCase_ =start_temperate lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =None while not search_end: lowerCamelCase_ =current_state.score() if best_state is None or current_score > best_state.score(): lowerCamelCase_ =current_state scores.append(__snake_case ) iterations += 1 lowerCamelCase_ =None lowerCamelCase_ =current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor lowerCamelCase_ =neighbors.pop(__snake_case ) lowerCamelCase_ =picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowerCamelCase_ =change * -1 # in case we are finding minimum if change > 0: # improves the solution lowerCamelCase_ =picked_neighbor else: lowerCamelCase_ =(math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowerCamelCase_ =picked_neighbor lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowerCamelCase_ =True else: lowerCamelCase_ =next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__snake_case ) , __snake_case ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing( prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : List[str] = simulated_annealing( prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return (3 * x**2) - (6 * y) a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" ) a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" )
6
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Any = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='encoder-decoder' lowercase : int =True def __init__( self, **lowerCAmelCase ): """simple docstring""" super().__init__(**lowerCAmelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" lowerCamelCase_ =kwargs.pop('''encoder''' ) lowerCamelCase_ =encoder_config.pop('''model_type''' ) lowerCamelCase_ =kwargs.pop('''decoder''' ) lowerCamelCase_ =decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowerCamelCase_ =AutoConfig.for_model(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =AutoConfig.for_model(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =True @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCamelCase_ =True lowerCamelCase_ =True return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.encoder.to_dict() lowerCamelCase_ =self.decoder.to_dict() lowerCamelCase_ =self.__class__.model_type return output
6
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a_ ( __snake_case : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def a_ ( __snake_case : List[Any] ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =emb.weight.shape lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case ) lowerCamelCase_ =emb.weight.data return lin_layer def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict: """simple docstring""" lowerCamelCase_ ={} for old_key in state_dict.keys(): lowerCamelCase_ =old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' ) else: lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCamelCase_ =state_dict[old_key] return new_dict def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =0 os.makedirs(__snake_case , exist_ok=__snake_case ) for expert in range(__snake_case ): lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(__snake_case ): lowerCamelCase_ =torch.load(__snake_case )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =os.path.join( __snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) torch.save(__snake_case , __snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__snake_case )[0]].dtype ) # Add the last block lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__snake_case ) == 1: lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) torch.save(__snake_case , __snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__snake_case , __snake_case ) # Otherwise, let's build the index lowerCamelCase_ ={} for idx, shard in enumerate(__snake_case ): lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' ) lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) for key in shard: lowerCamelCase_ =shard_file # Add the metadata lowerCamelCase_ ={'''total_size''': total_size} lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a_ : Tuple = parser.parse_args() a_ , a_ : int = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_28, args.dtype, ) a_ : Tuple = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28 ) config.save_pretrained(args.pytorch_dump_folder_path) a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
6
1
'''simple docstring''' import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __UpperCamelCase ( lowerCamelCase__ ): lowercase : BigBirdConfig lowercase : jnp.dtype =jnp.floataa lowercase : bool =True def lowercase__ ( self ): """simple docstring""" super().setup() lowerCamelCase_ =nn.Dense(5, dtype=self.dtype ) def __call__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =super().__call__(*lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] =FlaxBigBirdForNaturalQuestionsModule def a_ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Tuple , __snake_case : List[Any] ) -> int: """simple docstring""" def cross_entropy(__snake_case : Optional[Any] , __snake_case : Any , __snake_case : Union[str, Any]=None ): lowerCamelCase_ =logits.shape[-1] lowerCamelCase_ =(labels[..., None] == jnp.arange(__snake_case )[None]).astype('''f4''' ) lowerCamelCase_ =jax.nn.log_softmax(__snake_case , axis=-1 ) lowerCamelCase_ =-jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowerCamelCase_ =reduction(__snake_case ) return loss lowerCamelCase_ =partial(__snake_case , reduction=jnp.mean ) lowerCamelCase_ =cross_entropy(__snake_case , __snake_case ) lowerCamelCase_ =cross_entropy(__snake_case , __snake_case ) lowerCamelCase_ =cross_entropy(__snake_case , __snake_case ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __UpperCamelCase : lowercase : str ="google/bigbird-roberta-base" lowercase : int =30_00 lowercase : int =1_05_00 lowercase : int =1_28 lowercase : int =3 lowercase : int =1 lowercase : int =5 # tx_args lowercase : float =3E-5 lowercase : float =0.0 lowercase : int =2_00_00 lowercase : float =0.00_95 lowercase : str ="bigbird-roberta-natural-questions" lowercase : str ="training-expt" lowercase : str ="data/nq-training.jsonl" lowercase : str ="data/nq-validation.jsonl" def lowercase__ ( self ): """simple docstring""" os.makedirs(self.base_dir, exist_ok=lowerCAmelCase ) lowerCamelCase_ =os.path.join(self.base_dir, self.save_dir ) lowerCamelCase_ =self.batch_size_per_device * jax.device_count() @dataclass class __UpperCamelCase : lowercase : int lowercase : int =40_96 # no dynamic padding on TPUs def __call__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.collate_fn(lowerCAmelCase ) lowerCamelCase_ =jax.tree_util.tree_map(lowerCAmelCase, lowerCAmelCase ) return batch def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.fetch_inputs(features['''input_ids'''] ) lowerCamelCase_ ={ '''input_ids''': jnp.array(lowerCAmelCase, dtype=jnp.intaa ), '''attention_mask''': jnp.array(lowerCAmelCase, dtype=jnp.intaa ), '''start_labels''': jnp.array(features['''start_token'''], dtype=jnp.intaa ), '''end_labels''': jnp.array(features['''end_token'''], dtype=jnp.intaa ), '''pooled_labels''': jnp.array(features['''category'''], dtype=jnp.intaa ), } return batch def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[self._fetch_inputs(lowerCAmelCase ) for ids in input_ids] return zip(*lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[1 for _ in range(len(lowerCAmelCase ) )] while len(lowerCAmelCase ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def a_ ( __snake_case : List[Any] , __snake_case : int , __snake_case : Any=None ) -> Tuple: """simple docstring""" if seed is not None: lowerCamelCase_ =dataset.shuffle(seed=__snake_case ) for i in range(len(__snake_case ) // batch_size ): lowerCamelCase_ =dataset[i * batch_size : (i + 1) * batch_size] yield dict(__snake_case ) @partial(jax.pmap , axis_name='''batch''' ) def a_ ( __snake_case : Any , __snake_case : Dict , **__snake_case : Dict ) -> Optional[Any]: """simple docstring""" def loss_fn(__snake_case : Any ): lowerCamelCase_ =model_inputs.pop('''start_labels''' ) lowerCamelCase_ =model_inputs.pop('''end_labels''' ) lowerCamelCase_ =model_inputs.pop('''pooled_labels''' ) lowerCamelCase_ =state.apply_fn(**__snake_case , params=__snake_case , dropout_rng=__snake_case , train=__snake_case ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =outputs return state.loss_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) lowerCamelCase_, lowerCamelCase_ =jax.random.split(__snake_case ) lowerCamelCase_ =jax.value_and_grad(__snake_case ) lowerCamelCase_, lowerCamelCase_ =grad_fn(state.params ) lowerCamelCase_ =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) lowerCamelCase_ =jax.lax.pmean(__snake_case , '''batch''' ) lowerCamelCase_ =state.apply_gradients(grads=__snake_case ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='''batch''' ) def a_ ( __snake_case : Union[str, Any] , **__snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =model_inputs.pop('''start_labels''' ) lowerCamelCase_ =model_inputs.pop('''end_labels''' ) lowerCamelCase_ =model_inputs.pop('''pooled_labels''' ) lowerCamelCase_ =state.apply_fn(**__snake_case , params=state.params , train=__snake_case ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =outputs lowerCamelCase_ =state.loss_fn(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) lowerCamelCase_ =jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) return metrics class __UpperCamelCase ( train_state.TrainState ): lowercase : Callable =struct.field(pytree_node=lowerCamelCase__ ) @dataclass class __UpperCamelCase : lowercase : Args lowercase : Callable lowercase : Callable lowercase : Callable lowercase : Callable lowercase : wandb lowercase : Callable =None def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =model.params lowerCamelCase_ =TrainState.create( apply_fn=model.__call__, params=lowerCAmelCase, tx=lowerCAmelCase, loss_fn=lowerCAmelCase, ) if ckpt_dir is not None: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =restore_checkpoint(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ ={ '''lr''': args.lr, '''init_lr''': args.init_lr, '''warmup_steps''': args.warmup_steps, '''num_train_steps''': num_train_steps, '''weight_decay''': args.weight_decay, } lowerCamelCase_, lowerCamelCase_ =build_tx(**lowerCAmelCase ) lowerCamelCase_ =train_state.TrainState( step=lowerCAmelCase, apply_fn=model.__call__, params=lowerCAmelCase, tx=lowerCAmelCase, opt_state=lowerCAmelCase, ) lowerCamelCase_ =args lowerCamelCase_ =data_collator lowerCamelCase_ =lr lowerCamelCase_ =params lowerCamelCase_ =jax_utils.replicate(lowerCAmelCase ) return state def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.args lowerCamelCase_ =len(lowerCAmelCase ) // args.batch_size lowerCamelCase_ =jax.random.PRNGKey(0 ) lowerCamelCase_ =jax.random.split(lowerCAmelCase, jax.device_count() ) for epoch in range(args.max_epochs ): lowerCamelCase_ =jnp.array(0, dtype=jnp.floataa ) lowerCamelCase_ =get_batched_dataset(lowerCAmelCase, args.batch_size, seed=lowerCAmelCase ) lowerCamelCase_ =0 for batch in tqdm(lowerCAmelCase, total=lowerCAmelCase, desc=f'''Running EPOCH-{epoch}''' ): lowerCamelCase_ =self.data_collator(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =self.train_step_fn(lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 if i % args.logging_steps == 0: lowerCamelCase_ =jax_utils.unreplicate(state.step ) lowerCamelCase_ =running_loss.item() / i lowerCamelCase_ =self.scheduler_fn(state_step - 1 ) lowerCamelCase_ =self.evaluate(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ ={ '''step''': state_step.item(), '''eval_loss''': eval_loss.item(), '''tr_loss''': tr_loss, '''lr''': lr.item(), } tqdm.write(str(lowerCAmelCase ) ) self.logger.log(lowerCAmelCase, commit=lowerCAmelCase ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''', state=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =get_batched_dataset(lowerCAmelCase, self.args.batch_size ) lowerCamelCase_ =len(lowerCAmelCase ) // self.args.batch_size lowerCamelCase_ =jnp.array(0, dtype=jnp.floataa ) lowerCamelCase_ =0 for batch in tqdm(lowerCAmelCase, total=lowerCAmelCase, desc='''Evaluating ... ''' ): lowerCamelCase_ =self.data_collator(lowerCAmelCase ) lowerCamelCase_ =self.val_step_fn(lowerCAmelCase, **lowerCAmelCase ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 return running_loss / i def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =jax_utils.unreplicate(lowerCAmelCase ) print(f'''SAVING CHECKPOINT IN {save_dir}''', end=''' ... ''' ) self.model_save_fn(lowerCAmelCase, params=state.params ) with open(os.path.join(lowerCAmelCase, '''opt_state.msgpack''' ), '''wb''' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args, os.path.join(lowerCAmelCase, '''args.joblib''' ) ) joblib.dump(self.data_collator, os.path.join(lowerCAmelCase, '''data_collator.joblib''' ) ) with open(os.path.join(lowerCAmelCase, '''training_state.json''' ), '''w''' ) as f: json.dump({'''step''': state.step.item()}, lowerCAmelCase ) print('''DONE''' ) def a_ ( __snake_case : Tuple , __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' ) with open(os.path.join(__snake_case , '''flax_model.msgpack''' ) , '''rb''' ) as f: lowerCamelCase_ =from_bytes(state.params , f.read() ) with open(os.path.join(__snake_case , '''opt_state.msgpack''' ) , '''rb''' ) as f: lowerCamelCase_ =from_bytes(state.opt_state , f.read() ) lowerCamelCase_ =joblib.load(os.path.join(__snake_case , '''args.joblib''' ) ) lowerCamelCase_ =joblib.load(os.path.join(__snake_case , '''data_collator.joblib''' ) ) with open(os.path.join(__snake_case , '''training_state.json''' ) , '''r''' ) as f: lowerCamelCase_ =json.load(__snake_case ) lowerCamelCase_ =training_state['''step'''] print('''DONE''' ) return params, opt_state, step, args, data_collator def a_ ( __snake_case : List[str] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : str ) -> Tuple: """simple docstring""" lowerCamelCase_ =num_train_steps - warmup_steps lowerCamelCase_ =optax.linear_schedule(init_value=__snake_case , end_value=__snake_case , transition_steps=__snake_case ) lowerCamelCase_ =optax.linear_schedule(init_value=__snake_case , end_value=1e-7 , transition_steps=__snake_case ) lowerCamelCase_ =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def a_ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : str ) -> Dict: """simple docstring""" def weight_decay_mask(__snake_case : Optional[int] ): lowerCamelCase_ =traverse_util.flatten_dict(__snake_case ) lowerCamelCase_ ={k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()} return traverse_util.unflatten_dict(__snake_case ) lowerCamelCase_ =scheduler_fn(__snake_case , __snake_case , __snake_case , __snake_case ) lowerCamelCase_ =optax.adamw(learning_rate=__snake_case , weight_decay=__snake_case , mask=__snake_case ) return tx, lr
6
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =['image_processor', 'tokenizer'] lowercase : int ='LayoutLMv2ImageProcessor' lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ =features['''words'''] lowerCamelCase_ =self.tokenizer( text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, ) # add pixel values lowerCamelCase_ =features.pop('''pixel_values''' ) if return_overflowing_tokens is True: lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] ) lowerCamelCase_ =images return encoded_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' ) return images_with_overflow def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
1
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def a_ ( __snake_case : Optional[int] ) -> int: """simple docstring""" return 1 / (1 + np.exp(-z )) def a_ ( __snake_case : Optional[Any] , __snake_case : int ) -> Dict: """simple docstring""" return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean() def a_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : str ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =np.dot(__snake_case , __snake_case ) return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) ) def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Dict=7_0000 ) -> List[str]: """simple docstring""" lowerCamelCase_ =np.zeros(x.shape[1] ) for iterations in range(__snake_case ): lowerCamelCase_ =np.dot(__snake_case , __snake_case ) lowerCamelCase_ =sigmoid_function(__snake_case ) lowerCamelCase_ =np.dot(x.T , h - y ) / y.size lowerCamelCase_ =theta - alpha * gradient # updating the weights lowerCamelCase_ =np.dot(__snake_case , __snake_case ) lowerCamelCase_ =sigmoid_function(__snake_case ) lowerCamelCase_ =cost_function(__snake_case , __snake_case ) if iterations % 100 == 0: print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": a_ : List[str] = datasets.load_iris() a_ : int = iris.data[:, :2] a_ : Tuple = (iris.target != 0) * 1 a_ : Union[str, Any] = 0.1 a_ : Dict = logistic_reg(alpha, x, y, max_iterations=7_00_00) print("""theta: """, theta) # printing the theta i.e our weights vector def a_ ( __snake_case : Optional[Any] ) -> Dict: """simple docstring""" return sigmoid_function( np.dot(__snake_case , __snake_case ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""") ((a_) , (a_)) : Dict = (x[:, 0].min(), x[:, 0].max()) ((a_) , (a_)) : List[str] = (x[:, 1].min(), x[:, 1].max()) ((a_) , (a_)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) a_ : int = np.c_[xxa.ravel(), xxa.ravel()] a_ : Optional[Any] = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""") plt.legend() plt.show()
6
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =VQModel lowercase : Union[str, Any] ='sample' @property def lowercase__ ( self, lowerCAmelCase=(32, 32) ): """simple docstring""" lowerCamelCase_ =4 lowerCamelCase_ =3 lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) return {"sample": image} @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={ '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 3, } lowerCamelCase_ =self.dummy_input return init_dict, inputs_dict def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ), 0 ) model.to(lowerCAmelCase ) lowerCamelCase_ =model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(lowerCAmelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size ) lowerCamelCase_ =image.to(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).sample lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
6
1
'''simple docstring''' import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed a_ : Union[str, Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""): from run_translation import main # noqa set_seed(42) a_ : Tuple = """sshleifer/student_marian_en_ro_6_1""" a_ : Dict = """sshleifer/tiny-mbart""" @require_torch class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =self.run_trainer( eval_steps=1, max_len=12, model_name=lowerCAmelCase, num_train_epochs=1, distributed=lowerCAmelCase, extra_args_str=lowerCAmelCase, predict_with_generate=lowerCAmelCase, do_train=lowerCAmelCase, do_eval=lowerCAmelCase, do_predict=lowerCAmelCase, ) lowerCamelCase_ =TrainerState.load_from_json(os.path.join(lowerCAmelCase, '''trainer_state.json''' ) ).log_history if not do_eval: return lowerCamelCase_ =[log for log in logs if '''eval_loss''' in log.keys()] lowerCamelCase_ =eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats lowerCamelCase_ =eval_metrics[-1] assert isinstance(last_step_stats['''eval_bleu'''], lowerCAmelCase ) assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCAmelCase ) @require_torch_multi_gpu def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCAmelCase, extra_args_str='''--sharded_ddp simple''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCAmelCase, extra_args_str='''--sharded_ddp simple --fp16''' ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCAmelCase, extra_args_str='''--sharded_ddp zero_dp_2''', predict_with_generate=lowerCAmelCase ) @unittest.skip('''Requires an update of the env running those tests''' ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=lowerCAmelCase, extra_args_str='''--sharded_ddp zero_dp_2 --fp16''', predict_with_generate=lowerCAmelCase ) @require_apex @require_torch_gpu def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCAmelCase, extra_args_str='''--fp16 --fp16_backend=apex''' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=lowerCAmelCase, extra_args_str='''--fp16 --fp16_backend=apex''' ) @parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] ) @require_torch_multi_gpu def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={ # test with the default log_level - should be info and thus log info once '''base''': {'''extra_args_str''': '''''', '''n_matches''': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes '''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica '''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1}, # test with high log_level and log_level_replica - should be quiet on all processes '''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0}, } lowerCamelCase_ =experiments[experiment_id] lowerCamelCase_ ={'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False} lowerCamelCase_ ='''Running training''' with CaptureStderr() as cl: self.run_seqaseq_quick(**lowerCAmelCase, extra_args_str=data['''extra_args_str'''] ) lowerCamelCase_ =len(re.findall(lowerCAmelCase, cl.err ) ) self.assertEqual(lowerCAmelCase, data['''n_matches'''] ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.run_trainer( eval_steps=2, max_len=128, model_name=lowerCAmelCase, learning_rate=3e-4, num_train_epochs=10, distributed=lowerCAmelCase, ) # Check metrics lowerCamelCase_ =TrainerState.load_from_json(os.path.join(lowerCAmelCase, '''trainer_state.json''' ) ).log_history lowerCamelCase_ =[log for log in logs if '''eval_loss''' in log.keys()] lowerCamelCase_ =eval_metrics[0] lowerCamelCase_ =eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['''eval_bleu'''], lowerCAmelCase ) # test if do_predict saves generations and metrics lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ ={os.path.basename(lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def lowercase__ ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(lowerCAmelCase ) -> Tuple[int, float]: lowerCamelCase_ ='''--skip_memory_metrics 0''' lowerCamelCase_ =self.run_trainer( max_len=128, model_name=lowerCAmelCase, learning_rate=3e-4, num_train_epochs=1, optim=lowerCAmelCase, distributed=lowerCAmelCase, extra_args_str=lowerCAmelCase, do_eval=lowerCAmelCase, do_predict=lowerCAmelCase, n_gpus_to_use=1, ) # Check metrics lowerCamelCase_ =TrainerState.load_from_json(Path(lowerCAmelCase, '''trainer_state.json''' ) ).log_history lowerCamelCase_ =int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 ) lowerCamelCase_ =int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 ) lowerCamelCase_ =logs[0]['''train_loss'''] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) lowerCamelCase_ =gpu_alloc_mem_orig - gpu_alloc_mem_bnb lowerCamelCase_ =gpu_peak_mem_orig + gpu_alloc_mem_orig lowerCamelCase_ =gpu_peak_mem_bnb + gpu_alloc_mem_bnb lowerCamelCase_ =gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings lowerCamelCase_ =120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( lowerCAmelCase, lowerCAmelCase, '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got''' f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''', ) self.assertGreater( lowerCAmelCase, lowerCAmelCase, '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got''' f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''', ) self.assertEqual( lowerCAmelCase, lowerCAmelCase, f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 3e-3, lowerCAmelCase = "adafactor", lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro''' lowerCamelCase_ =self.get_auto_remove_tmp_dir() lowerCamelCase_ =f''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(lowerCAmelCase )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(lowerCAmelCase )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() lowerCamelCase_ =f''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(lowerCAmelCase )} '''.split() lowerCamelCase_ =''' --do_predict '''.split() lowerCamelCase_ =[] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: lowerCamelCase_ =get_gpu_count() lowerCamelCase_ =get_torch_dist_unique_port() lowerCamelCase_ =f''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() lowerCamelCase_ =[sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowerCAmelCase, env=self.get_env() ) else: lowerCamelCase_ =['''run_translation.py'''] + args with patch.object(lowerCAmelCase, '''argv''', lowerCAmelCase ): main() return output_dir
6
'''simple docstring''' import datasets from .evaluate import evaluate a_ : List[Any] = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ a_ : List[Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ a_ : Any = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} lowerCamelCase_ =[ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase ) return score
6
1
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def a_ ( __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =int(__snake_case ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =t // 3600, (t // 60) % 60, t % 60 return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}''' def a_ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Optional[Any]=300 ) -> Optional[int]: """simple docstring""" # docstyle-ignore return F''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def a_ ( __snake_case : str ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: lowerCamelCase_ =F'''{elt:.6f}''' if isinstance(__snake_case , __snake_case ) else str(__snake_case ) html_code += F''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __UpperCamelCase : lowercase : Tuple =5 lowercase : str =0.2 def __init__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 300, ): """simple docstring""" lowerCamelCase_ =total lowerCamelCase_ ='''''' if prefix is None else prefix lowerCamelCase_ =leave lowerCamelCase_ =parent lowerCamelCase_ =width lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =value if comment is not None: lowerCamelCase_ =comment if self.last_value is None: lowerCamelCase_ =lowerCamelCase_ =time.time() lowerCamelCase_ =lowerCamelCase_ =value lowerCamelCase_ =lowerCamelCase_ =None lowerCamelCase_ =self.warmup lowerCamelCase_ =1 self.update_bar(lowerCAmelCase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total ): if self.first_calls > 0: self.first_calls -= 1 lowerCamelCase_ =time.time() lowerCamelCase_ =current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: lowerCamelCase_ =self.elapsed_time / (value - self.start_value) else: lowerCamelCase_ =None if value >= self.total: lowerCamelCase_ =self.total lowerCamelCase_ =None if not self.leave: self.close() elif self.average_time_per_item is not None: lowerCamelCase_ =self.average_time_per_item * (self.total - value) self.update_bar(lowerCAmelCase ) lowerCamelCase_ =value lowerCamelCase_ =current_time if self.average_time_per_item is None: lowerCamelCase_ =1 else: lowerCamelCase_ =max(int(self.update_every / self.average_time_per_item ), 1 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =''' ''' * (len(str(self.total ) ) - len(str(lowerCAmelCase ) )) + str(lowerCAmelCase ) if self.elapsed_time is None: lowerCamelCase_ =f'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: lowerCamelCase_ =f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: lowerCamelCase_ =( f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' f''' {format_time(self.predicted_remaining )}''' ) self.label += f''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else f''', {self.comment}]''' self.display() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =html_progress_bar(self.value, self.total, self.prefix, self.label, self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: lowerCamelCase_ =disp.display(disp.HTML(self.html_code ), display_id=lowerCAmelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def lowercase__ ( self ): """simple docstring""" if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''' ) ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =None if column_names is None else [column_names] lowerCamelCase_ =None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =html_progress_bar(self.value, self.total, self.prefix, self.label, self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: lowerCamelCase_ =disp.display(disp.HTML(self.html_code ), display_id=lowerCAmelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if self.inner_table is None: lowerCamelCase_ =[list(values.keys() ), list(values.values() )] else: lowerCamelCase_ =self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(lowerCAmelCase ) lowerCamelCase_ =columns self.inner_table.append([values[c] for c in columns] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=300 ): """simple docstring""" lowerCamelCase_ =NotebookProgressBar(lowerCAmelCase, prefix=lowerCAmelCase, parent=self, width=lowerCAmelCase ) return self.child_bar def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =None self.display() class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self ): """simple docstring""" lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =[self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''' ) lowerCamelCase_ =NotebookTrainingTracker(state.max_steps, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =int(state.epoch ) if int(state.epoch ) == state.epoch else f'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1, comment=f'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, ) lowerCamelCase_ =False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if not has_length(lowerCAmelCase ): return if self.prediction_bar is None: if self.training_tracker is not None: lowerCamelCase_ =self.training_tracker.add_child(len(lowerCAmelCase ) ) else: lowerCamelCase_ =NotebookProgressBar(len(lowerCAmelCase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" if self.prediction_bar is not None: self.prediction_bar.close() lowerCamelCase_ =None def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: lowerCamelCase_ ={'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy lowerCamelCase_ =state.global_step self.training_tracker.write_line(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if self.training_tracker is not None: lowerCamelCase_ ={'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: lowerCamelCase_ =log['''loss'''] break if self.first_column == "Epoch": lowerCamelCase_ =int(state.epoch ) else: lowerCamelCase_ =state.global_step lowerCamelCase_ ='''eval''' for k in metrics: if k.endswith('''_loss''' ): lowerCamelCase_ =re.sub(R'''\_loss$''', '''''', lowerCAmelCase ) lowerCamelCase_ =metrics.pop('''total_flos''', lowerCAmelCase ) lowerCamelCase_ =metrics.pop('''epoch''', lowerCAmelCase ) lowerCamelCase_ =metrics.pop(f'''{metric_key_prefix}_runtime''', lowerCAmelCase ) lowerCamelCase_ =metrics.pop(f'''{metric_key_prefix}_samples_per_second''', lowerCAmelCase ) lowerCamelCase_ =metrics.pop(f'''{metric_key_prefix}_steps_per_second''', lowerCAmelCase ) lowerCamelCase_ =metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''', lowerCAmelCase ) for k, v in metrics.items(): if k == f'''{metric_key_prefix}_loss''': lowerCamelCase_ =v else: lowerCamelCase_ =k.split('''_''' ) lowerCamelCase_ =''' '''.join([part.capitalize() for part in splits[1:]] ) lowerCamelCase_ =v self.training_tracker.write_line(lowerCAmelCase ) self.training_tracker.remove_child() lowerCamelCase_ =None # Evaluation takes a long time so we should force the next update. lowerCamelCase_ =True def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" self.training_tracker.update( state.global_step, comment=f'''Epoch {int(state.epoch )}/{state.num_train_epochs}''', force_update=lowerCAmelCase ) lowerCamelCase_ =None
6
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer a_ : Tuple = logging.get_logger(__name__) a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_12, """facebook/dpr-ctx_encoder-multiset-base""": 5_12, } a_ : List[Any] = { """facebook/dpr-question_encoder-single-nq-base""": 5_12, """facebook/dpr-question_encoder-multiset-base""": 5_12, } a_ : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": 5_12, """facebook/dpr-reader-multiset-base""": 5_12, } a_ : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : Dict = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] =VOCAB_FILES_NAMES lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : Dict =DPRContextEncoderTokenizer class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : List[Any] =DPRQuestionEncoderTokenizer a_ : Union[str, Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) a_ : Dict = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(lowerCamelCase__ ) class __UpperCamelCase : def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if titles is None and texts is None: return super().__call__( lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) elif titles is None or texts is None: lowerCamelCase_ =titles if texts is None else texts return super().__call__( lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles] lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.''' lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ ={ '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase ) ] } if return_attention_mask is not False: lowerCamelCase_ =[] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCamelCase_ =attention_mask return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ): """simple docstring""" lowerCamelCase_ =reader_input['''input_ids'''] lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ ) lowerCamelCase_ =[] for doc_id in sorted_docs: lowerCamelCase_ =list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCamelCase_ =sequence_ids.index(self.pad_token_id ) else: lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =[] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase ) lowerCamelCase_ =[] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' lowerCamelCase_ =end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : int =VOCAB_FILES_NAMES lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION lowercase : int =['input_ids', 'attention_mask'] lowercase : Dict =DPRReaderTokenizer
6
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
6
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase_ ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowerCamelCase_ =Dataset.from_dict(__snake_case ) return dataset class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ), 2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase ) self.assertEqual(len(lowerCAmelCase ), 2 ) print(lowerCAmelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
6
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor a_ : Dict = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''', lowerCAmelCase, ) super().__init__(*lowerCAmelCase, **lowerCAmelCase )
6
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a_ : Any = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
1
'''simple docstring''' class __UpperCamelCase : def __init__( self, lowerCAmelCase = "", lowerCAmelCase = False ): """simple docstring""" lowerCamelCase_ ={} # A node will be a leaf if the tree contains its word lowerCamelCase_ =is_leaf lowerCamelCase_ =prefix def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =0 for q, w in zip(self.prefix, lowerCAmelCase ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" for word in words: self.insert(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if self.prefix == word: lowerCamelCase_ =True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: lowerCamelCase_ =RadixNode(prefix=lowerCAmelCase, is_leaf=lowerCAmelCase ) else: lowerCamelCase_ =self.nodes[word[0]] lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =incoming_node.match( lowerCAmelCase ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(lowerCAmelCase ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: lowerCamelCase_ =remaining_prefix lowerCamelCase_ =self.nodes[matching_string[0]] lowerCamelCase_ =RadixNode(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =aux_node if remaining_word == "": lowerCamelCase_ =True else: self.nodes[matching_string[0]].insert(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.nodes.get(word[0], lowerCAmelCase ) if not incoming_node: return False else: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =incoming_node.match( lowerCAmelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.nodes.get(word[0], lowerCAmelCase ) if not incoming_node: return False else: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =incoming_node.match( lowerCAmelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(lowerCAmelCase ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: lowerCamelCase_ =list(self.nodes.values() )[0] lowerCamelCase_ =merging_node.is_leaf self.prefix += merging_node.prefix lowerCamelCase_ =merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: lowerCamelCase_ =False # If there is 1 edge, we merge it with its child else: lowerCamelCase_ =list(incoming_node.nodes.values() )[0] lowerCamelCase_ =merging_node.is_leaf incoming_node.prefix += merging_node.prefix lowerCamelCase_ =merging_node.nodes return True def lowercase__ ( self, lowerCAmelCase = 0 ): """simple docstring""" if self.prefix != "": print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' ) for value in self.nodes.values(): value.print_tree(height + 1 ) def a_ ( ) -> bool: """simple docstring""" lowerCamelCase_ ='''banana bananas bandana band apple all beast'''.split() lowerCamelCase_ =RadixNode() root.insert_many(__snake_case ) assert all(root.find(__snake_case ) for word in words ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def a_ ( ) -> None: """simple docstring""" assert test_trie() def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =RadixNode() lowerCamelCase_ ='''banana bananas bandanas bandana band apple all beast'''.split() root.insert_many(__snake_case ) print('''Words:''' , __snake_case ) print('''Tree:''' ) root.print_tree() if __name__ == "__main__": main()
6
'''simple docstring''' from collections import defaultdict from math import gcd def a_ ( __snake_case : int = 150_0000 ) -> int: """simple docstring""" lowerCamelCase_ =defaultdict(__snake_case ) lowerCamelCase_ =2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ): if gcd(__snake_case , __snake_case ) > 1: continue lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__snake_case , limit + 1 , __snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def a_ ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : int , __snake_case : int ) -> np.ndarray: """simple docstring""" lowerCamelCase_ =cva.getAffineTransform(__snake_case , __snake_case ) return cva.warpAffine(__snake_case , __snake_case , (rows, cols) ) if __name__ == "__main__": # read original image a_ : List[str] = cva.imread( str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""") ) # turn image in gray scale value a_ : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape a_ , a_ : str = gray_img.shape # set different points to rotate image a_ : Tuple = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa) a_ : Optional[Any] = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa) a_ : Optional[int] = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa) a_ : List[Any] = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa) # add all rotated images in a list a_ : List[Any] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations a_ : List[Any] = plt.figure(1) a_ : List[Any] = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""") plt.title(titles[i]) plt.axis("""off""") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
6
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Tuple = 16 a_ : Optional[int] = 32 def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__snake_case : int ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_ =datasets.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_ =16 elif accelerator.mixed_precision != "no": lowerCamelCase_ =8 else: lowerCamelCase_ =None return tokenizer.pad( __snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCamelCase_ =DataLoader( tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) lowerCamelCase_ =DataLoader( tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ : Tuple = mocked_dataloaders # noqa: F811 def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1": lowerCamelCase_ =2 # Initialize accelerator lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ =config['''lr'''] lowerCamelCase_ =int(config['''num_epochs'''] ) lowerCamelCase_ =int(config['''seed'''] ) lowerCamelCase_ =int(config['''batch_size'''] ) lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__snake_case ) def inner_training_loop(__snake_case : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ =model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case ) lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case ) # Instantiate scheduler lowerCamelCase_ =get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.logits.argmax(dim=-1 ) lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) lowerCamelCase_ =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __snake_case ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowerCamelCase_ =parser.parse_args() lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
6
1
'''simple docstring''' import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ) ) for a, b in zip(lowerCAmelCase, lowerCAmelCase ): self.assertAlmostEqual(lowerCAmelCase, lowerCAmelCase, delta=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(lowerCAmelCase ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step, 3 ) self.assertEqual(len(accumulator.gradients ), 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =None ops.enable_eager_execution_internal() lowerCamelCase_ =tf.config.list_physical_devices('''CPU''' ) if len(lowerCAmelCase ) == 1: tf.config.set_logical_device_configuration( physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) lowerCamelCase_ =tf.config.list_logical_devices(device_type='''CPU''' ) lowerCamelCase_ =tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): lowerCamelCase_ =GradientAccumulator() lowerCamelCase_ =tf.Variable([4.0, 3.0] ) lowerCamelCase_, lowerCamelCase_ =create_optimizer(5e-5, 10, 5 ) lowerCamelCase_ =tf.Variable([0.0, 0.0], trainable=lowerCAmelCase ) def accumulate_on_replica(lowerCAmelCase ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) ) @tf.function def accumulate(lowerCAmelCase, lowerCAmelCase ): with strategy.scope(): lowerCamelCase_ =strategy.experimental_local_results(lowerCAmelCase ) local_variables[0].assign(lowerCAmelCase ) local_variables[1].assign(lowerCAmelCase ) strategy.run(lowerCAmelCase, args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(lowerCAmelCase ) def _check_local_values(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value(), lowerCAmelCase, tol=1e-2 ) self.assertListAlmostEqual(values[1].value(), lowerCAmelCase, tol=1e-2 ) accumulate([1.0, 2.0], [-1.0, 1.0] ) accumulate([3.0, -1.0], [-1.0, -1.0] ) accumulate([-2.0, 2.0], [3.0, -2.0] ) self.assertEqual(accumulator.step, 3 ) _check_local_values([2.0, 3.0], [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step, 0 ) _check_local_values([0.0, 0.0], [0.0, 0.0] )
6
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py a_ : List[str] = """src/diffusers""" # Matches is_xxx_available() a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") a_ : Optional[Any] = """ {0} = None """ a_ : List[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ a_ : Optional[Any] = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def a_ ( __snake_case : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =_re_backend.findall(__snake_case ) if len(__snake_case ) == 0: return None return "_and_".join(__snake_case ) def a_ ( ) -> Optional[int]: """simple docstring""" with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase_ =0 lowerCamelCase_ ={} # Go through the end of the file while line_index < len(__snake_case ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase_ =find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 lowerCamelCase_ =[] # Until we unindent, add backend objects to the list while line_index < len(__snake_case ) and len(lines[line_index] ) > 1: lowerCamelCase_ =lines[line_index] lowerCamelCase_ =_re_single_line_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__snake_case ) > 0: lowerCamelCase_ =objects else: line_index += 1 return backend_specific_objects def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(__snake_case ) elif name.islower(): return DUMMY_FUNCTION.format(__snake_case , __snake_case ) else: return DUMMY_CLASS.format(__snake_case , __snake_case ) def a_ ( __snake_case : Tuple=None ) -> List[str]: """simple docstring""" if backend_specific_objects is None: lowerCamelCase_ =read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase_ ={} for backend, objects in backend_specific_objects.items(): lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']''' lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] ) lowerCamelCase_ =dummy_file return dummy_files def a_ ( __snake_case : Dict=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase_ ={'''torch''': '''pt'''} # Locate actual dummy modules and read their content. lowerCamelCase_ =os.path.join(__snake_case , '''utils''' ) lowerCamelCase_ ={ backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase_ ={} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.read() else: lowerCamelCase_ ='''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main ''' '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` ''' '''to fix this.''' ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : Tuple = parser.parse_args() check_dummies(args.fix_and_overwrite)
6
1
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def a_ ( __snake_case : Any ) -> Tuple: """simple docstring""" lowerCamelCase_ =test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' F'''{test_file} instead.''' ) lowerCamelCase_ =components[-1] if not test_fn.endswith('''py''' ): raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )] lowerCamelCase_ ='''.'''.join(__snake_case ) return test_module_path def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_module_path(__snake_case ) lowerCamelCase_ =importlib.import_module(__snake_case ) return test_module def a_ ( __snake_case : Dict ) -> Tuple: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(__snake_case , __snake_case ) ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] ) if len(__snake_case ) > 0: test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =test_class() if hasattr(__snake_case , '''setUp''' ): test.setUp() lowerCamelCase_ =None if hasattr(__snake_case , '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: lowerCamelCase_ =test.model_tester.__class__ return model_tester def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =[] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case ) lowerCamelCase_ =[] for test_class in test_classes: lowerCamelCase_ =get_model_tester_from_test_class(__snake_case ) if tester_class is not None: tester_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Tuple ) -> Tuple: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes} return test_tester_mapping def a_ ( __snake_case : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_test_mapping def a_ ( __snake_case : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_to_tester_mapping def a_ ( __snake_case : List[str] ) -> List[Any]: """simple docstring""" if isinstance(__snake_case , __snake_case ): return o elif isinstance(__snake_case , __snake_case ): return o.__name__ elif isinstance(__snake_case , (list, tuple) ): return [to_json(__snake_case ) for x in o] elif isinstance(__snake_case , __snake_case ): return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()} else: return o
6
'''simple docstring''' a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def a_ ( __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ =0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution a_ : list[bool | None] = [None] * 10_00_00_00 a_ : List[Any] = True a_ : Optional[Any] = False def a_ ( __snake_case : int ) -> bool: """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase_ =chain(next_number(__snake_case ) ) lowerCamelCase_ =number_chain while number < 1000_0000: lowerCamelCase_ =number_chain number *= 10 return number_chain def a_ ( __snake_case : int = 1000_0000 ) -> int: """simple docstring""" for i in range(1 , __snake_case ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution() = }""")
6
1
'''simple docstring''' import math import sys import cva import numpy as np def a_ ( __snake_case : np.ndarray , __snake_case : float ) -> np.ndarray: """simple docstring""" # For applying gaussian function for each element in matrix. lowerCamelCase_ =math.sqrt(__snake_case ) lowerCamelCase_ =1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def a_ ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int , __snake_case : int ) -> np.ndarray: """simple docstring""" lowerCamelCase_ =kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def a_ ( __snake_case : int , __snake_case : float ) -> np.ndarray: """simple docstring""" # Creates a gaussian kernel of given dimension. lowerCamelCase_ =np.zeros((kernel_size, kernel_size) ) for i in range(0 , __snake_case ): for j in range(0 , __snake_case ): lowerCamelCase_ =math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(__snake_case , __snake_case ) def a_ ( __snake_case : np.ndarray , __snake_case : float , __snake_case : float , __snake_case : int , ) -> np.ndarray: """simple docstring""" lowerCamelCase_ =np.zeros(img.shape ) lowerCamelCase_ =get_gauss_kernel(__snake_case , __snake_case ) lowerCamelCase_, lowerCamelCase_ =img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): lowerCamelCase_ =get_slice(__snake_case , __snake_case , __snake_case , __snake_case ) lowerCamelCase_ =img_s - img_s[kernel_size // 2, kernel_size // 2] lowerCamelCase_ =vec_gaussian(__snake_case , __snake_case ) lowerCamelCase_ =np.multiply(__snake_case , __snake_case ) lowerCamelCase_ =np.multiply(__snake_case , __snake_case ) lowerCamelCase_ =np.sum(__snake_case ) / np.sum(__snake_case ) lowerCamelCase_ =val return imga def a_ ( __snake_case : list ) -> tuple: """simple docstring""" lowerCamelCase_ =args[1] if args[1:] else '''../image_data/lena.jpg''' lowerCamelCase_ =float(args[2] ) if args[2:] else 1.0 lowerCamelCase_ =float(args[3] ) if args[3:] else 1.0 if args[4:]: lowerCamelCase_ =int(args[4] ) lowerCamelCase_ =kernel_size + abs(kernel_size % 2 - 1 ) else: lowerCamelCase_ =5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": a_ , a_ , a_ , a_ : Union[str, Any] = parse_args(sys.argv) a_ : Dict = cva.imread(filename, 0) cva.imshow("""input image""", img) a_ : Tuple = img / 2_55 a_ : Optional[Any] = out.astype("""float32""") a_ : List[str] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) a_ : Union[str, Any] = out * 2_55 a_ : Union[str, Any] = np.uinta(out) cva.imshow("""output image""", out) cva.waitKey(0) cva.destroyAllWindows()
6
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a_ ( __snake_case : Tuple ) -> str: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class __UpperCamelCase ( lowerCamelCase__ ): @staticmethod def lowercase__ ( lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =parser.add_parser('''download''' ) download_parser.add_argument( '''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' ) download_parser.add_argument( '''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' ) download_parser.add_argument( '''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', ) download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' ) download_parser.set_defaults(func=lowerCAmelCase ) def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model lowerCamelCase_ =cache lowerCamelCase_ =force lowerCamelCase_ =trust_remote_code def lowercase__ ( self ): """simple docstring""" from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
6
1
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =str(id_ ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =[] lowerCamelCase_ ={} # {vertex:distance} def __lt__( self, lowerCAmelCase ): """simple docstring""" return self.key < other.key def __repr__( self ): """simple docstring""" return self.id def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.neighbors.append(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =weight def a_ ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int ) -> str: """simple docstring""" # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __snake_case ) graph[b - 1].add_edge(graph[a - 1] , __snake_case ) def a_ ( __snake_case : list , __snake_case : Vertex ) -> list: """simple docstring""" lowerCamelCase_ =[] for u in graph: lowerCamelCase_ =math.inf lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =graph[:] while q: lowerCamelCase_ =min(__snake_case ) q.remove(__snake_case ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): lowerCamelCase_ =u lowerCamelCase_ =u.edges[v.id] for i in range(1 , len(__snake_case ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def a_ ( __snake_case : list , __snake_case : Vertex ) -> Iterator[tuple]: """simple docstring""" for u in graph: lowerCamelCase_ =math.inf lowerCamelCase_ =None lowerCamelCase_ =0 lowerCamelCase_ =list(__snake_case ) hq.heapify(__snake_case ) while h: lowerCamelCase_ =hq.heappop(__snake_case ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): lowerCamelCase_ =u lowerCamelCase_ =u.edges[v.id] hq.heapify(__snake_case ) for i in range(1 , len(__snake_case ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def a_ ( ) -> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
6
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features a_ : List[str] = logging.get_logger(__name__) a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : lowercase : str =field( default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} ) lowercase : str =field( default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) lowercase : int =field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowercase : int =field( default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) lowercase : int =field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) lowercase : int =field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) lowercase : float =field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase : int =field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase : int =field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='train' lowercase : Any ='dev' class __UpperCamelCase ( lowerCamelCase__ ): lowercase : SquadDataTrainingArguments lowercase : List[SquadFeatures] lowercase : Split lowercase : bool def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ): """simple docstring""" lowerCamelCase_ =args lowerCamelCase_ =is_language_sensitive lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowerCAmelCase, lowerCAmelCase ): try: lowerCamelCase_ =Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) lowerCamelCase_ =mode # Load data features from cache or dataset file lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1''' lowerCamelCase_ =os.path.join( cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase_ =cached_features_file + '''.lock''' with FileLock(lowerCAmelCase ): if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache: lowerCamelCase_ =time.time() lowerCamelCase_ =torch.load(lowerCAmelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase_ =self.old_features['''features'''] lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase ) lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ''' future run''' ) else: if mode == Split.dev: lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase_ =self.processor.get_train_examples(args.data_dir ) lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features( examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, ) lowerCamelCase_ =time.time() torch.save( {'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.features[i] lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float ) lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': attention_mask, '''token_type_ids''': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} ) if self.args.version_2_with_negative: inputs.update({'''is_impossible''': is_impossible} ) if self.is_language_sensitive: inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} ) return inputs
6
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer a_ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : List[str] = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } a_ : List[Any] = { """unc-nlp/lxmert-base-uncased""": 5_12, } a_ : str = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] =VOCAB_FILES_NAMES lowercase : int =PRETRAINED_VOCAB_FILES_MAP lowercase : int =PRETRAINED_INIT_CONFIGURATION lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : int =LxmertTokenizer def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase="[UNK]", lowerCAmelCase="[SEP]", lowerCAmelCase="[PAD]", lowerCAmelCase="[CLS]", lowerCAmelCase="[MASK]", lowerCAmelCase=True, lowerCAmelCase=None, **lowerCAmelCase, ): """simple docstring""" super().__init__( lowerCAmelCase, tokenizer_file=lowerCAmelCase, do_lower_case=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, pad_token=lowerCAmelCase, cls_token=lowerCAmelCase, mask_token=lowerCAmelCase, tokenize_chinese_chars=lowerCAmelCase, strip_accents=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''', lowerCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''', lowerCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''', lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase_ =getattr(lowerCAmelCase, normalizer_state.pop('''type''' ) ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =strip_accents lowerCamelCase_ =tokenize_chinese_chars lowerCamelCase_ =normalizer_class(**lowerCAmelCase ) lowerCamelCase_ =do_lower_case def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =self._tokenizer.model.save(lowerCAmelCase, name=lowerCAmelCase ) return tuple(lowerCAmelCase )
6
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a_ : Any = logging.get_logger(__name__) a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/""" a_ : Any = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def a_ ( __snake_case : int ) -> Any: """simple docstring""" if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowerCamelCase_ =key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ ={} import re lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case ) elif re_encoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case ) elif re_encoder_block_proj_out.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case ) elif re_decoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case ) elif re_decoder_block_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case ) elif re_prior_cond_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case ) elif re_prior_cond_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case ) # keep original key else: lowerCamelCase_ =original_key lowerCamelCase_ =replace_key(__snake_case ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) lowerCamelCase_ =original_key lowerCamelCase_ =original_key lowerCamelCase_ =value return new_dict @torch.no_grad() def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case ) open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content ) lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]] lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case ) lowerCamelCase_ =JukeboxModel(__snake_case ) lowerCamelCase_ =[] lowerCamelCase_ ={} for i, dict_name in enumerate(__snake_case ): lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model'''] lowerCamelCase_ ={} for k in old_dic.keys(): if k.endswith('''.b''' ): lowerCamelCase_ =old_dic[k] elif k.endswith('''.w''' ): lowerCamelCase_ =old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowerCamelCase_ =old_dic[k] else: lowerCamelCase_ =old_dic[k] lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}''' lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case ) weight_dict.append(__snake_case ) lowerCamelCase_ =weight_dict.pop(0 ) model.vqvae.load_state_dict(__snake_case ) for i in range(len(__snake_case ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile: json.dump(__snake_case , __snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) return weight_dict if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) a_ : Optional[int] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
6
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ : Union[str, Any] = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys a_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
'''simple docstring''' def a_ ( __snake_case : int = 1000 ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =1, 1 lowerCamelCase_ =2 while True: lowerCamelCase_ =0 lowerCamelCase_ =fa + fa lowerCamelCase_, lowerCamelCase_ =fa, f index += 1 for _ in str(__snake_case ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
6
1
'''simple docstring''' def a_ ( __snake_case : int ) -> Any: """simple docstring""" # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =max(__snake_case ) lowerCamelCase_ =min(__snake_case ) # create the counting array lowerCamelCase_ =coll_max + 1 - coll_min lowerCamelCase_ =[0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , __snake_case ): lowerCamelCase_ =counting_arr[i] + counting_arr[i - 1] # create the output collection lowerCamelCase_ =[0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , __snake_case ) ): lowerCamelCase_ =collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return "".join([chr(__snake_case ) for i in counting_sort([ord(__snake_case ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt" a_ : List[Any] = input("""Enter numbers separated by a comma:\n""").strip() a_ : Union[str, Any] = [int(item) for item in user_input.split(""",""")] print(counting_sort(unsorted))
6
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def a_ ( __snake_case : Any ) -> Tuple: """simple docstring""" lowerCamelCase_ =test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' F'''{test_file} instead.''' ) lowerCamelCase_ =components[-1] if not test_fn.endswith('''py''' ): raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )] lowerCamelCase_ ='''.'''.join(__snake_case ) return test_module_path def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_module_path(__snake_case ) lowerCamelCase_ =importlib.import_module(__snake_case ) return test_module def a_ ( __snake_case : Dict ) -> Tuple: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(__snake_case , __snake_case ) ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] ) if len(__snake_case ) > 0: test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =test_class() if hasattr(__snake_case , '''setUp''' ): test.setUp() lowerCamelCase_ =None if hasattr(__snake_case , '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: lowerCamelCase_ =test.model_tester.__class__ return model_tester def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =[] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case ) lowerCamelCase_ =[] for test_class in test_classes: lowerCamelCase_ =get_model_tester_from_test_class(__snake_case ) if tester_class is not None: tester_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Tuple ) -> Tuple: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes} return test_tester_mapping def a_ ( __snake_case : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_test_mapping def a_ ( __snake_case : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_to_tester_mapping def a_ ( __snake_case : List[str] ) -> List[Any]: """simple docstring""" if isinstance(__snake_case , __snake_case ): return o elif isinstance(__snake_case , __snake_case ): return o.__name__ elif isinstance(__snake_case , (list, tuple) ): return [to_json(__snake_case ) for x in o] elif isinstance(__snake_case , __snake_case ): return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()} else: return o
6
1
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants a_ : Union[str, Any] = Mapping[str, np.ndarray] a_ : Optional[Any] = Mapping[str, Any] # Is a nested dict. a_ : Dict = 0.01 @dataclasses.dataclass(frozen=lowerCamelCase__ ) class __UpperCamelCase : lowercase : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowercase : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowercase : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowercase : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowercase : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowercase : Optional[np.ndarray] =None # Optional remark about the protein. Included as a comment in output PDB # files lowercase : Optional[str] =None # Templates used to generate this protein (prediction-only) lowercase : Optional[Sequence[str]] =None # Chain corresponding to each parent lowercase : Optional[Sequence[int]] =None def a_ ( __snake_case : str ) -> Protein: """simple docstring""" lowerCamelCase_ =r'''(\[[A-Z]+\]\n)''' lowerCamelCase_ =[tag.strip() for tag in re.split(__snake_case , __snake_case ) if len(__snake_case ) > 0] lowerCamelCase_ =zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) lowerCamelCase_ =["N", "CA", "C"] lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None for g in groups: if "[PRIMARY]" == g[0]: lowerCamelCase_ =g[1][0].strip() for i in range(len(__snake_case ) ): if seq[i] not in residue_constants.restypes: lowerCamelCase_ ='''X''' # FIXME: strings are immutable lowerCamelCase_ =np.array( [residue_constants.restype_order.get(__snake_case , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowerCamelCase_ =[] for axis in range(3 ): tertiary.append(list(map(__snake_case , g[1][axis].split() ) ) ) lowerCamelCase_ =np.array(__snake_case ) lowerCamelCase_ =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(__snake_case ): lowerCamelCase_ =np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowerCamelCase_ =np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) lowerCamelCase_ =np.zeros( ( len(__snake_case ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(__snake_case ): lowerCamelCase_ =1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=__snake_case , atom_mask=__snake_case , aatype=__snake_case , residue_index=np.arange(len(__snake_case ) ) , b_factors=__snake_case , ) def a_ ( __snake_case : Protein , __snake_case : int = 0 ) -> List[str]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =prot.remark if remark is not None: pdb_headers.append(F'''REMARK {remark}''' ) lowerCamelCase_ =prot.parents lowerCamelCase_ =prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowerCamelCase_ =[p for i, p in zip(__snake_case , __snake_case ) if i == chain_id] if parents is None or len(__snake_case ) == 0: lowerCamelCase_ =['''N/A'''] pdb_headers.append(F'''PARENT {' '.join(__snake_case )}''' ) return pdb_headers def a_ ( __snake_case : Protein , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =pdb_str.split('''\n''' ) lowerCamelCase_ =prot.remark if remark is not None: out_pdb_lines.append(F'''REMARK {remark}''' ) lowerCamelCase_ =42 if prot.parents is not None and len(prot.parents ) > 0: lowerCamelCase_ =[] if prot.parents_chain_index is not None: lowerCamelCase_ ={} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(__snake_case ) , [] ) parent_dict[str(__snake_case )].append(__snake_case ) lowerCamelCase_ =max([int(__snake_case ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowerCamelCase_ =parent_dict.get(str(__snake_case ) , ['''N/A'''] ) parents_per_chain.append(__snake_case ) else: parents_per_chain.append(list(prot.parents ) ) else: lowerCamelCase_ =[['''N/A''']] def make_parent_line(__snake_case : Sequence[str] ) -> str: return F'''PARENT {' '.join(__snake_case )}''' out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowerCamelCase_ =0 for i, l in enumerate(__snake_case ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(__snake_case ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(__snake_case ): lowerCamelCase_ =parents_per_chain[chain_counter] else: lowerCamelCase_ =['''N/A'''] out_pdb_lines.append(make_parent_line(__snake_case ) ) return "\n".join(__snake_case ) def a_ ( __snake_case : Protein ) -> str: """simple docstring""" lowerCamelCase_ =residue_constants.restypes + ['''X'''] def res_atoa(__snake_case : int ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) lowerCamelCase_ =residue_constants.atom_types lowerCamelCase_ =[] lowerCamelCase_ =prot.atom_mask lowerCamelCase_ =prot.aatype lowerCamelCase_ =prot.atom_positions lowerCamelCase_ =prot.residue_index.astype(np.intaa ) lowerCamelCase_ =prot.b_factors lowerCamelCase_ =prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) lowerCamelCase_ =get_pdb_headers(__snake_case ) if len(__snake_case ) > 0: pdb_lines.extend(__snake_case ) lowerCamelCase_ =aatype.shape[0] lowerCamelCase_ =1 lowerCamelCase_ =0 lowerCamelCase_ =string.ascii_uppercase lowerCamelCase_ =None # Add all atom sites. for i in range(__snake_case ): lowerCamelCase_ =res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(__snake_case , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowerCamelCase_ ='''ATOM''' lowerCamelCase_ =atom_name if len(__snake_case ) == 4 else F''' {atom_name}''' lowerCamelCase_ ='''''' lowerCamelCase_ ='''''' lowerCamelCase_ =1.0_0 lowerCamelCase_ =atom_name[0] # Protein supports only C, N, O, S, this works. lowerCamelCase_ ='''''' lowerCamelCase_ ='''A''' if chain_index is not None: lowerCamelCase_ =chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowerCamelCase_ =( F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}''' F'''{res_name_a:>3} {chain_tag:>1}''' F'''{residue_index[i]:>4}{insertion_code:>1} ''' F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}''' F'''{occupancy:>6.2f}{b_factor:>6.2f} ''' F'''{element:>2}{charge:>2}''' ) pdb_lines.append(__snake_case ) atom_index += 1 lowerCamelCase_ =i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowerCamelCase_ =True lowerCamelCase_ =chain_index[i + 1] if should_terminate: # Close the chain. lowerCamelCase_ ='''TER''' lowerCamelCase_ =( F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}''' ) pdb_lines.append(__snake_case ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(__snake_case , __snake_case ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(__snake_case ) def a_ ( __snake_case : Protein ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def a_ ( __snake_case : FeatureDict , __snake_case : ModelOutput , __snake_case : Optional[np.ndarray] = None , __snake_case : Optional[np.ndarray] = None , __snake_case : Optional[str] = None , __snake_case : Optional[Sequence[str]] = None , __snake_case : Optional[Sequence[int]] = None , ) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__snake_case , remark=__snake_case , parents=__snake_case , parents_chain_index=__snake_case , )
6
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] )
6
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=3, lowerCAmelCase=18, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], lowerCAmelCase=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =size if size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =image_size lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =do_center_crop lowerCamelCase_ =crop_size lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean lowerCamelCase_ =image_std lowerCamelCase_ =do_convert_rgb def lowercase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False ): """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowerCamelCase_ =[] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255, size=(self.num_channels, self.max_resolution, self.max_resolution), dtype=np.uinta ) ) else: lowerCamelCase_ =[] for i in range(self.batch_size ): lowerCamelCase_, lowerCamelCase_ =np.random.choice(np.arange(self.min_resolution, self.max_resolution ), 2 ) image_inputs.append(np.random.randint(255, size=(self.num_channels, width, height), dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowerCamelCase_ =[Image.fromarray(np.moveaxis(lowerCAmelCase, 0, -1 ) ) for x in image_inputs] if torchify: lowerCamelCase_ =[torch.from_numpy(lowerCAmelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Tuple =ChineseCLIPImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ChineseCLIPImageProcessingTester(self, do_center_crop=lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_convert_rgb''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''height''': 224, '''width''': 224} ) self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} ) lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 ) self.assertEqual(image_processor.size, {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ =self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, np.ndarray ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ =self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, torch.Tensor ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) @require_torch @require_vision class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Tuple =ChineseCLIPImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ChineseCLIPImageProcessingTester(self, num_channels=4, do_center_crop=lowerCAmelCase ) lowerCamelCase_ =3 @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_convert_rgb''' ) ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =self.image_processor_tester.prepare_inputs(equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), )
6
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] =['image_processor', 'tokenizer'] lowercase : Optional[int] ='AutoImageProcessor' lowercase : List[str] ='AutoTokenizer' def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =self.image_processor lowerCamelCase_ =False def __call__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase ) if len(lowerCAmelCase ) > 0: lowerCamelCase_ =args[0] lowerCamelCase_ =args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: lowerCamelCase_ =encodings['''input_ids'''] return inputs def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @contextmanager def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) lowerCamelCase_ =True lowerCamelCase_ =self.tokenizer yield lowerCamelCase_ =self.image_processor lowerCamelCase_ =False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ): """simple docstring""" if added_vocab is None: lowerCamelCase_ =self.tokenizer.get_added_vocab() lowerCamelCase_ ={} while tokens: lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE ) if start_token is None: break lowerCamelCase_ =start_token.group(1 ) lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE ) lowerCamelCase_ =start_token.group() if end_token is None: lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' ) else: lowerCamelCase_ =end_token.group() lowerCamelCase_ =re.escape(lowerCAmelCase ) lowerCamelCase_ =re.escape(lowerCAmelCase ) lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE ) if content is not None: lowerCamelCase_ =content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase ) if value: if len(lowerCAmelCase ) == 1: lowerCamelCase_ =value[0] lowerCamelCase_ =value else: # leaf nodes lowerCamelCase_ =[] for leaf in content.split(R'''<sep/>''' ): lowerCamelCase_ =leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": lowerCamelCase_ =leaf[1:-2] # for categorical special tokens output[key].append(lowerCAmelCase ) if len(output[key] ) == 1: lowerCamelCase_ =output[key][0] lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase ) if len(lowerCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer a_ : List[str] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast a_ : Optional[int] = TaTokenizerFast a_ : int = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys a_ : Union[str, Any] = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
6
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =ShapEImgaImgPipeline lowercase : Dict =['image'] lowercase : str =['image'] lowercase : int =[ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] lowercase : int =False @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self ): """simple docstring""" return 8 @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, ) lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =CLIPImageProcessor( crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, ) return image_processor @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowerCamelCase_ =PriorTransformer(**lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowerCamelCase_ =ShapERenderer(**lowerCAmelCase ) return model def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_prior lowerCamelCase_ =self.dummy_image_encoder lowerCamelCase_ =self.dummy_image_processor lowerCamelCase_ =self.dummy_renderer lowerCamelCase_ =HeunDiscreteScheduler( beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, ) lowerCamelCase_ ={ '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowerCamelCase_ =np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase__ ( self ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device == '''cpu''' lowerCamelCase_ =True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =1 lowerCamelCase_ =2 lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) for key in inputs.keys(): if key in self.batch_params: lowerCamelCase_ =batch_size * [inputs[key]] lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) lowerCamelCase_ =pipe( lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
6
1
'''simple docstring''' import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=4, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_attention_mask lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_choices def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =None if self.use_attention_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowerCamelCase_ =RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ =True lowerCamelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Tuple =True lowercase : List[Any] =( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaxRobertaModelTester(self ) @slow def lowercase__ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_ =model_class_name.from_pretrained('''roberta-base''', from_pt=lowerCAmelCase ) lowerCamelCase_ =model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase )
6
'''simple docstring''' from itertools import product def a_ ( __snake_case : int , __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =sides_number lowerCamelCase_ =max_face_number * dice_number lowerCamelCase_ =[0] * (max_total + 1) lowerCamelCase_ =1 lowerCamelCase_ =range(__snake_case , max_face_number + 1 ) for dice_numbers in product(__snake_case , repeat=__snake_case ): lowerCamelCase_ =sum(__snake_case ) totals_frequencies[total] += 1 return totals_frequencies def a_ ( ) -> float: """simple docstring""" lowerCamelCase_ =total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCamelCase_ =total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCamelCase_ =0 lowerCamelCase_ =9 lowerCamelCase_ =4 * 9 lowerCamelCase_ =6 for peter_total in range(__snake_case , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCamelCase_ =(4**9) * (6**6) lowerCamelCase_ =peter_wins_count / total_games_number lowerCamelCase_ =round(__snake_case , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' def a_ ( __snake_case : list[int] , __snake_case : str ) -> list[int]: """simple docstring""" lowerCamelCase_ =int(__snake_case ) # Initialize Result lowerCamelCase_ =[] # Traverse through all denomination for denomination in reversed(__snake_case ): # Find denominations while int(__snake_case ) >= int(__snake_case ): total_value -= int(__snake_case ) answer.append(__snake_case ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": a_ : Optional[Any] = [] a_ : List[str] = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): a_ : int = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) a_ : Dict = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter a_ : int = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] a_ : List[Any] = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(F"""Following is minimal change for {value}: """) a_ : str = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
6
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union a_ : Tuple = TypeVar("""T""") a_ : Dict = Union[List[T], Tuple[T, ...]] a_ : int = Union[T, List[T], Dict[str, T]] a_ : Optional[Any] = Union[str, bytes, os.PathLike]
6
1
'''simple docstring''' import random def a_ ( __snake_case : int ) -> bool: """simple docstring""" lowerCamelCase_ =num - 1 lowerCamelCase_ =0 while s % 2 == 0: lowerCamelCase_ =s // 2 t += 1 for _ in range(5 ): lowerCamelCase_ =random.randrange(2 , num - 1 ) lowerCamelCase_ =pow(__snake_case , __snake_case , __snake_case ) if v != 1: lowerCamelCase_ =0 while v != (num - 1): if i == t - 1: return False else: lowerCamelCase_ =i + 1 lowerCamelCase_ =(v**2) % num return True def a_ ( __snake_case : int ) -> bool: """simple docstring""" if num < 2: return False lowerCamelCase_ =[ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__snake_case ) def a_ ( __snake_case : int = 1024 ) -> int: """simple docstring""" while True: lowerCamelCase_ =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(__snake_case ): return num if __name__ == "__main__": a_ : Optional[int] = generate_large_prime() print(("""Prime number:""", num)) print(("""is_prime_low_num:""", is_prime_low_num(num)))
6
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =search_prob lowerCamelCase_ =start_temperate lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =None while not search_end: lowerCamelCase_ =current_state.score() if best_state is None or current_score > best_state.score(): lowerCamelCase_ =current_state scores.append(__snake_case ) iterations += 1 lowerCamelCase_ =None lowerCamelCase_ =current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor lowerCamelCase_ =neighbors.pop(__snake_case ) lowerCamelCase_ =picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowerCamelCase_ =change * -1 # in case we are finding minimum if change > 0: # improves the solution lowerCamelCase_ =picked_neighbor else: lowerCamelCase_ =(math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowerCamelCase_ =picked_neighbor lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowerCamelCase_ =True else: lowerCamelCase_ =next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__snake_case ) , __snake_case ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing( prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : List[str] = simulated_annealing( prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return (3 * x**2) - (6 * y) a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" ) a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" )
6
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a_ : Tuple = logging.get_logger(__name__) def a_ ( __snake_case : List[str] ) -> str: """simple docstring""" lowerCamelCase_ =MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: lowerCamelCase_ =[144, 192, 240] lowerCamelCase_ =[16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: lowerCamelCase_ =[96, 120, 144] lowerCamelCase_ =[16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: lowerCamelCase_ =[64, 80, 96] lowerCamelCase_ =[16, 16, 24, 48, 64, 80, 320] lowerCamelCase_ =0.0_5 lowerCamelCase_ =2.0 if mobilevit_name.startswith('''deeplabv3_''' ): lowerCamelCase_ =512 lowerCamelCase_ =16 lowerCamelCase_ =21 lowerCamelCase_ ='''pascal-voc-id2label.json''' else: lowerCamelCase_ =1000 lowerCamelCase_ ='''imagenet-1k-id2label.json''' lowerCamelCase_ ='''huggingface/label-files''' lowerCamelCase_ =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) ) lowerCamelCase_ ={int(__snake_case ): v for k, v in idalabel.items()} lowerCamelCase_ =idalabel lowerCamelCase_ ={v: k for k, v in idalabel.items()} return config def a_ ( __snake_case : Dict , __snake_case : Dict=False ) -> Tuple: """simple docstring""" for i in range(1 , 6 ): if F'''layer_{i}.''' in name: lowerCamelCase_ =name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: lowerCamelCase_ =name.replace('''conv_1.''' , '''conv_stem.''' ) if ".block." in name: lowerCamelCase_ =name.replace('''.block.''' , '''.''' ) if "exp_1x1" in name: lowerCamelCase_ =name.replace('''exp_1x1''' , '''expand_1x1''' ) if "red_1x1" in name: lowerCamelCase_ =name.replace('''red_1x1''' , '''reduce_1x1''' ) if ".local_rep.conv_3x3." in name: lowerCamelCase_ =name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' ) if ".local_rep.conv_1x1." in name: lowerCamelCase_ =name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' ) if ".norm." in name: lowerCamelCase_ =name.replace('''.norm.''' , '''.normalization.''' ) if ".conv." in name: lowerCamelCase_ =name.replace('''.conv.''' , '''.convolution.''' ) if ".conv_proj." in name: lowerCamelCase_ =name.replace('''.conv_proj.''' , '''.conv_projection.''' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: lowerCamelCase_ =name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: lowerCamelCase_ =name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' ) if "expand_1x1" in name: lowerCamelCase_ =name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' ) if "conv_3x3" in name: lowerCamelCase_ =name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' ) if "reduce_1x1" in name: lowerCamelCase_ =name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' ) for i in range(2 , 5 ): if F'''.global_rep.{i}.weight''' in name: lowerCamelCase_ =name.replace(F'''.global_rep.{i}.weight''' , '''.layernorm.weight''' ) if F'''.global_rep.{i}.bias''' in name: lowerCamelCase_ =name.replace(F'''.global_rep.{i}.bias''' , '''.layernorm.bias''' ) if ".global_rep." in name: lowerCamelCase_ =name.replace('''.global_rep.''' , '''.transformer.''' ) if ".pre_norm_mha.0." in name: lowerCamelCase_ =name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' ) if ".pre_norm_mha.1.out_proj." in name: lowerCamelCase_ =name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' ) if ".pre_norm_ffn.0." in name: lowerCamelCase_ =name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' ) if ".pre_norm_ffn.1." in name: lowerCamelCase_ =name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' ) if ".pre_norm_ffn.4." in name: lowerCamelCase_ =name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' ) if ".transformer." in name: lowerCamelCase_ =name.replace('''.transformer.''' , '''.transformer.layer.''' ) if ".aspp_layer." in name: lowerCamelCase_ =name.replace('''.aspp_layer.''' , '''.''' ) if ".aspp_pool." in name: lowerCamelCase_ =name.replace('''.aspp_pool.''' , '''.''' ) if "seg_head." in name: lowerCamelCase_ =name.replace('''seg_head.''' , '''segmentation_head.''' ) if "segmentation_head.classifier.classifier." in name: lowerCamelCase_ =name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' ) if "classifier.fc." in name: lowerCamelCase_ =name.replace('''classifier.fc.''' , '''classifier.''' ) elif (not base_model) and ("segmentation_head." not in name): lowerCamelCase_ ='''mobilevit.''' + name return name def a_ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : List[Any]=False ) -> Union[str, Any]: """simple docstring""" if base_model: lowerCamelCase_ ='''''' else: lowerCamelCase_ ='''mobilevit.''' for key in orig_state_dict.copy().keys(): lowerCamelCase_ =orig_state_dict.pop(__snake_case ) if key[:8] == "encoder.": lowerCamelCase_ =key[8:] if "qkv" in key: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =int(key_split[0][6:] ) - 1 lowerCamelCase_ =int(key_split[3] ) lowerCamelCase_ =model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' ) lowerCamelCase_ =layer.transformer.layer[transformer_num].attention.attention.all_head_size lowerCamelCase_ =( F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: lowerCamelCase_ =val[:dim, :] lowerCamelCase_ =val[dim : dim * 2, :] lowerCamelCase_ =val[-dim:, :] else: lowerCamelCase_ =val[:dim] lowerCamelCase_ =val[dim : dim * 2] lowerCamelCase_ =val[-dim:] else: lowerCamelCase_ =val return orig_state_dict def a_ ( ) -> int: """simple docstring""" lowerCamelCase_ ='''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Dict , __snake_case : int=False ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_mobilevit_config(__snake_case ) # load original state_dict lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) # load 🤗 model if mobilevit_name.startswith('''deeplabv3_''' ): lowerCamelCase_ =MobileViTForSemanticSegmentation(__snake_case ).eval() else: lowerCamelCase_ =MobileViTForImageClassification(__snake_case ).eval() lowerCamelCase_ =convert_state_dict(__snake_case , __snake_case ) model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by MobileViTImageProcessor lowerCamelCase_ =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowerCamelCase_ =image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.logits if mobilevit_name.startswith('''deeplabv3_''' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": lowerCamelCase_ =torch.tensor( [ [[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]], [[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]], [[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": lowerCamelCase_ =torch.tensor( [ [[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]], [[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]], [[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": lowerCamelCase_ =torch.tensor( [ [[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]], [[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]], [[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]], ] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 ) else: assert logits.shape == (1, 1000) if mobilevit_name == "mobilevit_s": lowerCamelCase_ =torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] ) elif mobilevit_name == "mobilevit_xs": lowerCamelCase_ =torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] ) elif mobilevit_name == "mobilevit_xxs": lowerCamelCase_ =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) if push_to_hub: lowerCamelCase_ ={ '''mobilevit_s''': '''mobilevit-small''', '''mobilevit_xs''': '''mobilevit-x-small''', '''mobilevit_xxs''': '''mobilevit-xx-small''', '''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''', '''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''', '''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''', } print('''Pushing to the hub...''' ) lowerCamelCase_ =model_mapping[mobilevit_name] image_processor.push_to_hub(__snake_case , organization='''apple''' ) model.push_to_hub(__snake_case , organization='''apple''' ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) a_ : str = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a_ ( __snake_case : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def a_ ( __snake_case : List[Any] ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =emb.weight.shape lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case ) lowerCamelCase_ =emb.weight.data return lin_layer def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict: """simple docstring""" lowerCamelCase_ ={} for old_key in state_dict.keys(): lowerCamelCase_ =old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' ) else: lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCamelCase_ =state_dict[old_key] return new_dict def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =0 os.makedirs(__snake_case , exist_ok=__snake_case ) for expert in range(__snake_case ): lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(__snake_case ): lowerCamelCase_ =torch.load(__snake_case )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =os.path.join( __snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) torch.save(__snake_case , __snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__snake_case )[0]].dtype ) # Add the last block lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__snake_case ) == 1: lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) torch.save(__snake_case , __snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__snake_case , __snake_case ) # Otherwise, let's build the index lowerCamelCase_ ={} for idx, shard in enumerate(__snake_case ): lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' ) lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) for key in shard: lowerCamelCase_ =shard_file # Add the metadata lowerCamelCase_ ={'''total_size''': total_size} lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a_ : Tuple = parser.parse_args() a_ , a_ : int = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_28, args.dtype, ) a_ : Tuple = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28 ) config.save_pretrained(args.pytorch_dump_folder_path) a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
6
1
'''simple docstring''' a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def a_ ( __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ =0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution a_ : list[bool | None] = [None] * 10_00_00_00 a_ : List[Any] = True a_ : Optional[Any] = False def a_ ( __snake_case : int ) -> bool: """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase_ =chain(next_number(__snake_case ) ) lowerCamelCase_ =number_chain while number < 1000_0000: lowerCamelCase_ =number_chain number *= 10 return number_chain def a_ ( __snake_case : int = 1000_0000 ) -> int: """simple docstring""" for i in range(1 , __snake_case ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution() = }""")
6
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =['image_processor', 'tokenizer'] lowercase : int ='LayoutLMv2ImageProcessor' lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ =features['''words'''] lowerCamelCase_ =self.tokenizer( text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, ) # add pixel values lowerCamelCase_ =features.pop('''pixel_values''' ) if return_overflowing_tokens is True: lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] ) lowerCamelCase_ =images return encoded_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' ) return images_with_overflow def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
1
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example a_ : List[str] = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example a_ : List[str] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def a_ ( __snake_case : list[list[int]] ) -> list[list[int]]: """simple docstring""" lowerCamelCase_ =[] for i in range(len(__snake_case ) ): lowerCamelCase_ =[] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCamelCase_ =0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(__snake_case ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(__snake_case ) - 1: neighbour_count += cells[i + 1][j] if i < len(__snake_case ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCamelCase_ =cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(__snake_case ) return next_generation def a_ ( __snake_case : list[list[int]] , __snake_case : int ) -> list[Image.Image]: """simple docstring""" lowerCamelCase_ =[] for _ in range(__snake_case ): # Create output image lowerCamelCase_ =Image.new('''RGB''' , (len(cells[0] ), len(__snake_case )) ) lowerCamelCase_ =img.load() # Save cells to image for x in range(len(__snake_case ) ): for y in range(len(cells[0] ) ): lowerCamelCase_ =255 - cells[y][x] * 255 lowerCamelCase_ =(colour, colour, colour) # Save image images.append(__snake_case ) lowerCamelCase_ =new_generation(__snake_case ) return images if __name__ == "__main__": a_ : Union[str, Any] = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
6
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =VQModel lowercase : Union[str, Any] ='sample' @property def lowercase__ ( self, lowerCAmelCase=(32, 32) ): """simple docstring""" lowerCamelCase_ =4 lowerCamelCase_ =3 lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) return {"sample": image} @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={ '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 3, } lowerCamelCase_ =self.dummy_input return init_dict, inputs_dict def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ), 0 ) model.to(lowerCAmelCase ) lowerCamelCase_ =model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(lowerCAmelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size ) lowerCamelCase_ =image.to(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).sample lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
6
1
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def a_ ( __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ ={ '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, oder?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCamelCase_ ={ '''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''], '''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''], '''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''], '''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''], } lowerCamelCase_ =F'''{src_lang}-{tgt_lang}''' lowerCamelCase_ =F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , '''README.md''' ) print(F'''Generating {path}''' ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(__snake_case ) # make sure we are under the root of the project a_ : Union[str, Any] = Path(__file__).resolve().parent.parent.parent a_ : Optional[int] = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: a_ , a_ , a_ : str = model_name.split("""-""") a_ : Tuple = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
6
'''simple docstring''' import datasets from .evaluate import evaluate a_ : List[Any] = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ a_ : List[Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ a_ : Any = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} lowerCamelCase_ =[ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase ) return score
6
1
'''simple docstring''' import re import string import numpy as np import datasets a_ : Dict = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ a_ : Optional[Any] = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 25.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 50.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 75.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results[\"exact_match\"], 1)) 100.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"] >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 33.3 """ a_ : int = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''string''', id='''sequence''' ), '''references''': datasets.Value('''string''', id='''sequence''' ), } ), reference_urls=[], ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, ): """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: lowerCamelCase_ =np.array([re.sub(lowerCAmelCase, '''''', lowerCAmelCase ) for x in predictions] ) lowerCamelCase_ =np.array([re.sub(lowerCAmelCase, '''''', lowerCAmelCase ) for x in references] ) else: lowerCamelCase_ =np.asarray(lowerCAmelCase ) lowerCamelCase_ =np.asarray(lowerCAmelCase ) if ignore_case: lowerCamelCase_ =np.char.lower(lowerCAmelCase ) lowerCamelCase_ =np.char.lower(lowerCAmelCase ) if ignore_punctuation: lowerCamelCase_ =string.punctuation.maketrans('''''', '''''', string.punctuation ) lowerCamelCase_ =np.char.translate(lowerCAmelCase, table=lowerCAmelCase ) lowerCamelCase_ =np.char.translate(lowerCAmelCase, table=lowerCAmelCase ) if ignore_numbers: lowerCamelCase_ =string.digits.maketrans('''''', '''''', string.digits ) lowerCamelCase_ =np.char.translate(lowerCAmelCase, table=lowerCAmelCase ) lowerCamelCase_ =np.char.translate(lowerCAmelCase, table=lowerCAmelCase ) lowerCamelCase_ =predictions == references return {"exact_match": np.mean(lowerCAmelCase ) * 100}
6
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer a_ : Tuple = logging.get_logger(__name__) a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_12, """facebook/dpr-ctx_encoder-multiset-base""": 5_12, } a_ : List[Any] = { """facebook/dpr-question_encoder-single-nq-base""": 5_12, """facebook/dpr-question_encoder-multiset-base""": 5_12, } a_ : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": 5_12, """facebook/dpr-reader-multiset-base""": 5_12, } a_ : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : Dict = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] =VOCAB_FILES_NAMES lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : Dict =DPRContextEncoderTokenizer class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : List[Any] =DPRQuestionEncoderTokenizer a_ : Union[str, Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) a_ : Dict = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(lowerCamelCase__ ) class __UpperCamelCase : def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if titles is None and texts is None: return super().__call__( lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) elif titles is None or texts is None: lowerCamelCase_ =titles if texts is None else texts return super().__call__( lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles] lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.''' lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ ={ '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase ) ] } if return_attention_mask is not False: lowerCamelCase_ =[] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCamelCase_ =attention_mask return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ): """simple docstring""" lowerCamelCase_ =reader_input['''input_ids'''] lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ ) lowerCamelCase_ =[] for doc_id in sorted_docs: lowerCamelCase_ =list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCamelCase_ =sequence_ids.index(self.pad_token_id ) else: lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =[] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase ) lowerCamelCase_ =[] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' lowerCamelCase_ =end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : int =VOCAB_FILES_NAMES lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION lowercase : int =['input_ids', 'attention_mask'] lowercase : Dict =DPRReaderTokenizer
6
1
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase_ ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowerCamelCase_ =Dataset.from_dict(__snake_case ) return dataset class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ), 2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase ) self.assertEqual(len(lowerCAmelCase ), 2 ) print(lowerCAmelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
6
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase_ ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowerCamelCase_ =Dataset.from_dict(__snake_case ) return dataset class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ), 2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase ) self.assertEqual(len(lowerCAmelCase ), 2 ) print(lowerCAmelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
6
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =tempfile.mkdtemp() # fmt: off lowerCamelCase_ =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest'''] # fmt: on lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowerCamelCase_ ={ '''do_resize''': True, '''size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.5, 0.5, 0.5], '''image_std''': [0.5, 0.5, 0.5], } lowerCamelCase_ =os.path.join(self.tmpdirname, lowerCAmelCase ) with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp: json.dump(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =[np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )] lowerCamelCase_ =[Image.fromarray(np.moveaxis(lowerCAmelCase, 0, -1 ) ) for x in image_inputs] return image_inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_image_processor() lowerCamelCase_ =VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ =VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ =self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' ) lowerCamelCase_ =self.get_image_processor(do_normalize=lowerCAmelCase, padding_value=1.0 ) lowerCamelCase_ =VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCAmelCase, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_image_processor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase ) lowerCamelCase_ =self.prepare_image_inputs() lowerCamelCase_ =image_processor(lowerCAmelCase, return_tensors='''np''' ) lowerCamelCase_ =processor(images=lowerCAmelCase, return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_image_processor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase ) lowerCamelCase_ ='''lower newer''' lowerCamelCase_ =processor(text=lowerCAmelCase ) lowerCamelCase_ =tokenizer(lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_image_processor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase ) lowerCamelCase_ ='''lower newer''' lowerCamelCase_ =self.prepare_image_inputs() lowerCamelCase_ =processor(text=lowerCAmelCase, images=lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with self.assertRaises(lowerCAmelCase ): processor() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_image_processor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase ) lowerCamelCase_ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase_ =processor.batch_decode(lowerCAmelCase ) lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_image_processor() lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =VisionTextDualEncoderProcessor(tokenizer=lowerCAmelCase, image_processor=lowerCAmelCase ) lowerCamelCase_ ='''lower newer''' lowerCamelCase_ =self.prepare_image_inputs() lowerCamelCase_ =processor(text=lowerCAmelCase, images=lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
6
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a_ : Any = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple ='facebook/bart-large-mnli' lowercase : Tuple =( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) lowercase : Any ='text_classifier' lowercase : Any =AutoTokenizer lowercase : Dict =AutoModelForSequenceClassification lowercase : Any =['text', ['text']] lowercase : int =['text'] def lowercase__ ( self ): """simple docstring""" super().setup() lowerCamelCase_ =self.model.config lowerCamelCase_ =-1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): lowerCamelCase_ =int(lowerCAmelCase ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =labels return self.pre_processor( [text] * len(lowerCAmelCase ), [f'''This example is {label}''' for label in labels], return_tensors='''pt''', padding='''max_length''', ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =outputs.logits lowerCamelCase_ =torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
6
'''simple docstring''' from collections import defaultdict from math import gcd def a_ ( __snake_case : int = 150_0000 ) -> int: """simple docstring""" lowerCamelCase_ =defaultdict(__snake_case ) lowerCamelCase_ =2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ): if gcd(__snake_case , __snake_case ) > 1: continue lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__snake_case , limit + 1 , __snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer a_ : Dict = logging.get_logger(__name__) a_ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : Optional[Any] = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } a_ : Dict = { """junnyu/roformer_chinese_small""": 15_36, """junnyu/roformer_chinese_base""": 15_36, """junnyu/roformer_chinese_char_small""": 5_12, """junnyu/roformer_chinese_char_base""": 5_12, """junnyu/roformer_small_discriminator""": 1_28, """junnyu/roformer_small_generator""": 1_28, } a_ : List[Any] = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] =VOCAB_FILES_NAMES lowercase : int =PRETRAINED_VOCAB_FILES_MAP lowercase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =PRETRAINED_INIT_CONFIGURATION lowercase : str =RoFormerTokenizer def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase="[UNK]", lowerCAmelCase="[SEP]", lowerCAmelCase="[PAD]", lowerCAmelCase="[CLS]", lowerCAmelCase="[MASK]", lowerCAmelCase=True, lowerCAmelCase=None, **lowerCAmelCase, ): """simple docstring""" super().__init__( lowerCAmelCase, tokenizer_file=lowerCAmelCase, do_lower_case=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, pad_token=lowerCAmelCase, cls_token=lowerCAmelCase, mask_token=lowerCAmelCase, tokenize_chinese_chars=lowerCAmelCase, strip_accents=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''', lowerCAmelCase ) != do_lower_case or pre_tok_state.get('''strip_accents''', lowerCAmelCase ) != strip_accents ): lowerCamelCase_ =getattr(lowerCAmelCase, pre_tok_state.pop('''type''' ) ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =strip_accents lowerCamelCase_ =pre_tok_class(**lowerCAmelCase ) lowerCamelCase_ =do_lower_case def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =BertPreTokenizer() return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d lowerCamelCase_ =self.__dict__['''_tokenizer'''].get_vocab() lowerCamelCase_ =PreTokenizer.custom(JiebaPreTokenizer(lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =self._tokenizer.model.save(lowerCAmelCase, name=lowerCAmelCase ) return tuple(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=False, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =BertPreTokenizer() return super().save_pretrained(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase )
6
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Tuple = 16 a_ : Optional[int] = 32 def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__snake_case : int ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_ =datasets.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_ =16 elif accelerator.mixed_precision != "no": lowerCamelCase_ =8 else: lowerCamelCase_ =None return tokenizer.pad( __snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCamelCase_ =DataLoader( tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) lowerCamelCase_ =DataLoader( tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ : Tuple = mocked_dataloaders # noqa: F811 def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1": lowerCamelCase_ =2 # Initialize accelerator lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ =config['''lr'''] lowerCamelCase_ =int(config['''num_epochs'''] ) lowerCamelCase_ =int(config['''seed'''] ) lowerCamelCase_ =int(config['''batch_size'''] ) lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__snake_case ) def inner_training_loop(__snake_case : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ =model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case ) lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case ) # Instantiate scheduler lowerCamelCase_ =get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.logits.argmax(dim=-1 ) lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) lowerCamelCase_ =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __snake_case ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowerCamelCase_ =parser.parse_args() lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
6
1
'''simple docstring''' from itertools import product def a_ ( __snake_case : int , __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =sides_number lowerCamelCase_ =max_face_number * dice_number lowerCamelCase_ =[0] * (max_total + 1) lowerCamelCase_ =1 lowerCamelCase_ =range(__snake_case , max_face_number + 1 ) for dice_numbers in product(__snake_case , repeat=__snake_case ): lowerCamelCase_ =sum(__snake_case ) totals_frequencies[total] += 1 return totals_frequencies def a_ ( ) -> float: """simple docstring""" lowerCamelCase_ =total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCamelCase_ =total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCamelCase_ =0 lowerCamelCase_ =9 lowerCamelCase_ =4 * 9 lowerCamelCase_ =6 for peter_total in range(__snake_case , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCamelCase_ =(4**9) * (6**6) lowerCamelCase_ =peter_wins_count / total_games_number lowerCamelCase_ =round(__snake_case , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
6
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py a_ : List[str] = """src/diffusers""" # Matches is_xxx_available() a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") a_ : Optional[Any] = """ {0} = None """ a_ : List[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ a_ : Optional[Any] = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def a_ ( __snake_case : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =_re_backend.findall(__snake_case ) if len(__snake_case ) == 0: return None return "_and_".join(__snake_case ) def a_ ( ) -> Optional[int]: """simple docstring""" with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase_ =0 lowerCamelCase_ ={} # Go through the end of the file while line_index < len(__snake_case ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase_ =find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 lowerCamelCase_ =[] # Until we unindent, add backend objects to the list while line_index < len(__snake_case ) and len(lines[line_index] ) > 1: lowerCamelCase_ =lines[line_index] lowerCamelCase_ =_re_single_line_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__snake_case ) > 0: lowerCamelCase_ =objects else: line_index += 1 return backend_specific_objects def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(__snake_case ) elif name.islower(): return DUMMY_FUNCTION.format(__snake_case , __snake_case ) else: return DUMMY_CLASS.format(__snake_case , __snake_case ) def a_ ( __snake_case : Tuple=None ) -> List[str]: """simple docstring""" if backend_specific_objects is None: lowerCamelCase_ =read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase_ ={} for backend, objects in backend_specific_objects.items(): lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']''' lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] ) lowerCamelCase_ =dummy_file return dummy_files def a_ ( __snake_case : Dict=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase_ ={'''torch''': '''pt'''} # Locate actual dummy modules and read their content. lowerCamelCase_ =os.path.join(__snake_case , '''utils''' ) lowerCamelCase_ ={ backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase_ ={} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.read() else: lowerCamelCase_ ='''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main ''' '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` ''' '''to fix this.''' ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : Tuple = parser.parse_args() check_dummies(args.fix_and_overwrite)
6
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : Tuple = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys a_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
'''simple docstring''' a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def a_ ( __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ =0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000] number //= 10_0000 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution a_ : list[bool | None] = [None] * 10_00_00_00 a_ : List[Any] = True a_ : Optional[Any] = False def a_ ( __snake_case : int ) -> bool: """simple docstring""" if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCamelCase_ =chain(next_number(__snake_case ) ) lowerCamelCase_ =number_chain while number < 1000_0000: lowerCamelCase_ =number_chain number *= 10 return number_chain def a_ ( __snake_case : int = 1000_0000 ) -> int: """simple docstring""" for i in range(1 , __snake_case ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution() = }""")
6
1
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =(DDPMScheduler,) def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={ '''num_train_timesteps''': 1_000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**lowerCAmelCase ) return config def lowercase__ ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1], [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase, beta_end=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase, prediction_type=lowerCAmelCase, sample_max_value=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter lowerCamelCase_ =torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase ) ): # 1. predict noise residual lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, generator=lowerCAmelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCamelCase_ =pred_prev_sample lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) ) lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter lowerCamelCase_ =torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase ) ): # 1. predict noise residual lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase ) # 2. predict previous mean of sample x_t-1 lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, generator=lowerCAmelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowerCamelCase_ =pred_prev_sample lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) ) lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =[100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase ) lowerCamelCase_ =scheduler.timesteps for i, timestep in enumerate(lowerCAmelCase ): if i == len(lowerCAmelCase ) - 1: lowerCamelCase_ =-1 else: lowerCamelCase_ =timesteps[i + 1] lowerCamelCase_ =scheduler.previous_timestep(lowerCAmelCase ) lowerCamelCase_ =prev_t.item() self.assertEqual(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =[100, 87, 50, 51, 0] with self.assertRaises(lowerCAmelCase, msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =[100, 87, 50, 1, 0] lowerCamelCase_ =len(lowerCAmelCase ) with self.assertRaises(lowerCAmelCase, msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase, timesteps=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) lowerCamelCase_ =[scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase, msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''', ): scheduler.set_timesteps(timesteps=lowerCAmelCase )
6
'''simple docstring''' from argparse import ArgumentParser from . import BaseTransformersCLICommand def a_ ( __snake_case : Tuple ) -> str: """simple docstring""" return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class __UpperCamelCase ( lowerCamelCase__ ): @staticmethod def lowercase__ ( lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =parser.add_parser('''download''' ) download_parser.add_argument( '''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' ) download_parser.add_argument( '''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' ) download_parser.add_argument( '''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', ) download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' ) download_parser.set_defaults(func=lowerCAmelCase ) def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =model lowerCamelCase_ =cache lowerCamelCase_ =force lowerCamelCase_ =trust_remote_code def lowercase__ ( self ): """simple docstring""" from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
6
1
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): @property def lowercase__ ( self ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ort.SessionOptions() lowerCamelCase_ =False return options def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) lowerCamelCase_ =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''', revision='''onnx''', safety_checker=lowerCAmelCase, feature_extractor=lowerCAmelCase, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ ='''A red cat sitting on a park bench''' lowerCamelCase_ =np.random.RandomState(0 ) lowerCamelCase_ =pipe( prompt=lowerCAmelCase, image=lowerCAmelCase, mask_image=lowerCAmelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase, output_type='''np''', ) lowerCamelCase_ =output.images lowerCamelCase_ =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) lowerCamelCase_ =LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''', subfolder='''scheduler''', revision='''onnx''' ) lowerCamelCase_ =OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''', revision='''onnx''', scheduler=lowerCAmelCase, safety_checker=lowerCAmelCase, feature_extractor=lowerCAmelCase, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ ='''A red cat sitting on a park bench''' lowerCamelCase_ =np.random.RandomState(0 ) lowerCamelCase_ =pipe( prompt=lowerCAmelCase, image=lowerCAmelCase, mask_image=lowerCAmelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase, output_type='''np''', ) lowerCamelCase_ =output.images lowerCamelCase_ =images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) lowerCamelCase_ =np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
6
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features a_ : List[str] = logging.get_logger(__name__) a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : lowercase : str =field( default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} ) lowercase : str =field( default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} ) lowercase : int =field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowercase : int =field( default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , ) lowercase : int =field( default=64 , metadata={ 'help': ( 'The maximum number of tokens for the question. Questions longer than this will ' 'be truncated to this length.' ) } , ) lowercase : int =field( default=30 , metadata={ 'help': ( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ) } , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} ) lowercase : float =field( default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase : int =field( default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} ) lowercase : int =field( default=0 , metadata={ 'help': ( 'language id of input for language-specific xlm models (see' ' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)' ) } , ) lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='train' lowercase : Any ='dev' class __UpperCamelCase ( lowerCamelCase__ ): lowercase : SquadDataTrainingArguments lowercase : List[SquadFeatures] lowercase : Split lowercase : bool def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ): """simple docstring""" lowerCamelCase_ =args lowerCamelCase_ =is_language_sensitive lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowerCAmelCase, lowerCAmelCase ): try: lowerCamelCase_ =Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) lowerCamelCase_ =mode # Load data features from cache or dataset file lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1''' lowerCamelCase_ =os.path.join( cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowerCamelCase_ =cached_features_file + '''.lock''' with FileLock(lowerCAmelCase ): if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache: lowerCamelCase_ =time.time() lowerCamelCase_ =torch.load(lowerCAmelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowerCamelCase_ =self.old_features['''features'''] lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase ) lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' ''' future run''' ) else: if mode == Split.dev: lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir ) else: lowerCamelCase_ =self.processor.get_train_examples(args.data_dir ) lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features( examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, ) lowerCamelCase_ =time.time() torch.save( {'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ): """simple docstring""" return len(self.features ) def __getitem__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.features[i] lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float ) lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float ) lowerCamelCase_ ={ '''input_ids''': input_ids, '''attention_mask''': attention_mask, '''token_type_ids''': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} ) if self.args.version_2_with_negative: inputs.update({'''is_impossible''': is_impossible} ) if self.is_language_sensitive: inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long ) lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long ) inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} ) return inputs
6
1
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def a_ ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" lowerCamelCase_ =tau * frequency / samplerate lowerCamelCase_ =sin(__snake_case ) lowerCamelCase_ =cos(__snake_case ) lowerCamelCase_ =_sin / (2 * q_factor) lowerCamelCase_ =(1 - _cos) / 2 lowerCamelCase_ =1 - _cos lowerCamelCase_ =1 + alpha lowerCamelCase_ =-2 * _cos lowerCamelCase_ =1 - alpha lowerCamelCase_ =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a_ ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" lowerCamelCase_ =tau * frequency / samplerate lowerCamelCase_ =sin(__snake_case ) lowerCamelCase_ =cos(__snake_case ) lowerCamelCase_ =_sin / (2 * q_factor) lowerCamelCase_ =(1 + _cos) / 2 lowerCamelCase_ =-1 - _cos lowerCamelCase_ =1 + alpha lowerCamelCase_ =-2 * _cos lowerCamelCase_ =1 - alpha lowerCamelCase_ =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a_ ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" lowerCamelCase_ =tau * frequency / samplerate lowerCamelCase_ =sin(__snake_case ) lowerCamelCase_ =cos(__snake_case ) lowerCamelCase_ =_sin / (2 * q_factor) lowerCamelCase_ =_sin / 2 lowerCamelCase_ =0 lowerCamelCase_ =-ba lowerCamelCase_ =1 + alpha lowerCamelCase_ =-2 * _cos lowerCamelCase_ =1 - alpha lowerCamelCase_ =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a_ ( __snake_case : int , __snake_case : int , __snake_case : float = 1 / sqrt(2 ) ) -> IIRFilter: """simple docstring""" lowerCamelCase_ =tau * frequency / samplerate lowerCamelCase_ =sin(__snake_case ) lowerCamelCase_ =cos(__snake_case ) lowerCamelCase_ =_sin / (2 * q_factor) lowerCamelCase_ =1 - alpha lowerCamelCase_ =-2 * _cos lowerCamelCase_ =1 + alpha lowerCamelCase_ =IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def a_ ( __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : float = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" lowerCamelCase_ =tau * frequency / samplerate lowerCamelCase_ =sin(__snake_case ) lowerCamelCase_ =cos(__snake_case ) lowerCamelCase_ =_sin / (2 * q_factor) lowerCamelCase_ =10 ** (gain_db / 40) lowerCamelCase_ =1 + alpha * big_a lowerCamelCase_ =-2 * _cos lowerCamelCase_ =1 - alpha * big_a lowerCamelCase_ =1 + alpha / big_a lowerCamelCase_ =-2 * _cos lowerCamelCase_ =1 - alpha / big_a lowerCamelCase_ =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a_ ( __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : float = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" lowerCamelCase_ =tau * frequency / samplerate lowerCamelCase_ =sin(__snake_case ) lowerCamelCase_ =cos(__snake_case ) lowerCamelCase_ =_sin / (2 * q_factor) lowerCamelCase_ =10 ** (gain_db / 40) lowerCamelCase_ =(big_a + 1) - (big_a - 1) * _cos lowerCamelCase_ =(big_a + 1) + (big_a - 1) * _cos lowerCamelCase_ =(big_a - 1) - (big_a + 1) * _cos lowerCamelCase_ =(big_a - 1) + (big_a + 1) * _cos lowerCamelCase_ =2 * sqrt(__snake_case ) * alpha lowerCamelCase_ =big_a * (pmc + aaa) lowerCamelCase_ =2 * big_a * mpc lowerCamelCase_ =big_a * (pmc - aaa) lowerCamelCase_ =ppmc + aaa lowerCamelCase_ =-2 * pmpc lowerCamelCase_ =ppmc - aaa lowerCamelCase_ =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def a_ ( __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : float = 1 / sqrt(2 ) , ) -> IIRFilter: """simple docstring""" lowerCamelCase_ =tau * frequency / samplerate lowerCamelCase_ =sin(__snake_case ) lowerCamelCase_ =cos(__snake_case ) lowerCamelCase_ =_sin / (2 * q_factor) lowerCamelCase_ =10 ** (gain_db / 40) lowerCamelCase_ =(big_a + 1) - (big_a - 1) * _cos lowerCamelCase_ =(big_a + 1) + (big_a - 1) * _cos lowerCamelCase_ =(big_a - 1) - (big_a + 1) * _cos lowerCamelCase_ =(big_a - 1) + (big_a + 1) * _cos lowerCamelCase_ =2 * sqrt(__snake_case ) * alpha lowerCamelCase_ =big_a * (ppmc + aaa) lowerCamelCase_ =-2 * big_a * pmpc lowerCamelCase_ =big_a * (ppmc - aaa) lowerCamelCase_ =pmc + aaa lowerCamelCase_ =2 * mpc lowerCamelCase_ =pmc - aaa lowerCamelCase_ =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
6
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a_ : Any = logging.get_logger(__name__) a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/""" a_ : Any = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def a_ ( __snake_case : int ) -> Any: """simple docstring""" if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: lowerCamelCase_ =key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ ={} import re lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) lowerCamelCase_ =re.compile( r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case ) elif re_encoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case ) elif re_encoder_block_proj_out.fullmatch(__snake_case ): lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case ) elif re_decoder_block_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case ) elif re_decoder_block_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case ) elif re_prior_cond_resnet.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2 lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]] lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' lowerCamelCase_ =prefix + resnet_block lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case ) elif re_prior_cond_proj_in.fullmatch(__snake_case ): lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case ) lowerCamelCase_ =regex_match.groups() lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case ) # keep original key else: lowerCamelCase_ =original_key lowerCamelCase_ =replace_key(__snake_case ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) lowerCamelCase_ =original_key lowerCamelCase_ =original_key lowerCamelCase_ =value return new_dict @torch.no_grad() def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]: """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case ) open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content ) lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]] lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case ) lowerCamelCase_ =JukeboxModel(__snake_case ) lowerCamelCase_ =[] lowerCamelCase_ ={} for i, dict_name in enumerate(__snake_case ): lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model'''] lowerCamelCase_ ={} for k in old_dic.keys(): if k.endswith('''.b''' ): lowerCamelCase_ =old_dic[k] elif k.endswith('''.w''' ): lowerCamelCase_ =old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: lowerCamelCase_ =old_dic[k] else: lowerCamelCase_ =old_dic[k] lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}''' lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case ) weight_dict.append(__snake_case ) lowerCamelCase_ =weight_dict.pop(0 ) model.vqvae.load_state_dict(__snake_case ) for i in range(len(__snake_case ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile: json.dump(__snake_case , __snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) return weight_dict if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) a_ : Optional[int] = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
6
1
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path a_ : int = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) a_ : Tuple = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} a_ : Optional[int] = """zero2""" a_ : str = """zero3""" a_ : Tuple = [ZEROa, ZEROa] def a_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param lowerCamelCase_ =parameterized.to_safe_name('''_'''.join(str(__snake_case ) for x in param.args ) ) return F'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test a_ : str = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __UpperCamelCase ( lowerCamelCase__ ): @parameterized.expand(lowerCAmelCase, name_func=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" self.run_and_check( stage=lowerCAmelCase, model=lowerCAmelCase, distributed=lowerCAmelCase, fpaa=lowerCAmelCase, ) @require_torch_multi_gpu @parameterized.expand(lowerCAmelCase, name_func=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" self.run_and_check( stage=lowerCAmelCase, model=lowerCAmelCase, distributed=lowerCAmelCase, fpaa=lowerCAmelCase, ) @parameterized.expand(lowerCAmelCase, name_func=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" self.run_and_check( stage=lowerCAmelCase, model=lowerCAmelCase, distributed=lowerCAmelCase, fpaa=lowerCAmelCase, ) @require_torch_multi_gpu @parameterized.expand(lowerCAmelCase, name_func=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" self.run_and_check( stage=lowerCAmelCase, model=lowerCAmelCase, distributed=lowerCAmelCase, fpaa=lowerCAmelCase, ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" pass def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 10, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = True, ): """simple docstring""" lowerCamelCase_ =models[model] lowerCamelCase_ =self.run_trainer( stage=lowerCAmelCase, model_name=lowerCAmelCase, eval_steps=lowerCAmelCase, num_train_epochs=1, distributed=lowerCAmelCase, fpaa=lowerCAmelCase, ) self.do_checks(lowerCAmelCase ) return output_dir def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 10, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, ): """simple docstring""" lowerCamelCase_ =self.get_auto_remove_tmp_dir('''./xxx''', after=lowerCAmelCase ) lowerCamelCase_ =f''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowerCAmelCase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files lowerCamelCase_ =f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() lowerCamelCase_ =[f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] lowerCamelCase_ =self.get_launcher(lowerCAmelCase ) lowerCamelCase_ =launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowerCAmelCase, env=self.get_env() ) return output_dir def lowercase__ ( self, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =min(2, get_gpu_count() ) if distributed else 1 return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
6
'''simple docstring''' def a_ ( __snake_case : int = 1000 ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =1, 1 lowerCamelCase_ =2 while True: lowerCamelCase_ =0 lowerCamelCase_ =fa + fa lowerCamelCase_, lowerCamelCase_ =fa, f index += 1 for _ in str(__snake_case ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
6
1
'''simple docstring''' import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py a_ : List[str] = """src/diffusers""" # Matches is_xxx_available() a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""") # Matches from xxx import bla a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") a_ : Optional[Any] = """ {0} = None """ a_ : List[Any] = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) """ a_ : Optional[Any] = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def a_ ( __snake_case : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =_re_backend.findall(__snake_case ) if len(__snake_case ) == 0: return None return "_and_".join(__snake_case ) def a_ ( ) -> Optional[int]: """simple docstring""" with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.readlines() # Get to the point we do the actual imports for type checking lowerCamelCase_ =0 lowerCamelCase_ ={} # Go through the end of the file while line_index < len(__snake_case ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCamelCase_ =find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 lowerCamelCase_ =[] # Until we unindent, add backend objects to the list while line_index < len(__snake_case ) and len(lines[line_index] ) > 1: lowerCamelCase_ =lines[line_index] lowerCamelCase_ =_re_single_line_import.search(__snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__snake_case ) > 0: lowerCamelCase_ =objects else: line_index += 1 return backend_specific_objects def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(__snake_case ) elif name.islower(): return DUMMY_FUNCTION.format(__snake_case , __snake_case ) else: return DUMMY_CLASS.format(__snake_case , __snake_case ) def a_ ( __snake_case : Tuple=None ) -> List[str]: """simple docstring""" if backend_specific_objects is None: lowerCamelCase_ =read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCamelCase_ ={} for backend, objects in backend_specific_objects.items(): lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']''' lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] ) lowerCamelCase_ =dummy_file return dummy_files def a_ ( __snake_case : Dict=False ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCamelCase_ ={'''torch''': '''pt'''} # Locate actual dummy modules and read their content. lowerCamelCase_ =os.path.join(__snake_case , '''utils''' ) lowerCamelCase_ ={ backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' ) for backend in dummy_files.keys() } lowerCamelCase_ ={} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__snake_case ): with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.read() else: lowerCamelCase_ ='''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main ''' '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` ''' '''to fix this.''' ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a_ : Tuple = parser.parse_args() check_dummies(args.fix_and_overwrite)
6
'''simple docstring''' import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def a_ ( __snake_case : Any ) -> Tuple: """simple docstring""" lowerCamelCase_ =test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' F'''{test_file} instead.''' ) lowerCamelCase_ =components[-1] if not test_fn.endswith('''py''' ): raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )] lowerCamelCase_ ='''.'''.join(__snake_case ) return test_module_path def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =get_module_path(__snake_case ) lowerCamelCase_ =importlib.import_module(__snake_case ) return test_module def a_ ( __snake_case : Dict ) -> Tuple: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(__snake_case , __snake_case ) ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =get_test_module(__snake_case ) for attr in dir(__snake_case ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] ) if len(__snake_case ) > 0: test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =test_class() if hasattr(__snake_case , '''setUp''' ): test.setUp() lowerCamelCase_ =None if hasattr(__snake_case , '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: lowerCamelCase_ =test.model_tester.__class__ return model_tester def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ =[] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any: """simple docstring""" lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case ) lowerCamelCase_ =[] for test_class in test_classes: lowerCamelCase_ =get_model_tester_from_test_class(__snake_case ) if tester_class is not None: tester_classes.append(__snake_case ) # sort with class names return sorted(__snake_case , key=lambda __snake_case : x.__name__ ) def a_ ( __snake_case : Tuple ) -> Tuple: """simple docstring""" lowerCamelCase_ =get_test_classes(__snake_case ) lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes} return test_tester_mapping def a_ ( __snake_case : Dict ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_test_mapping def a_ ( __snake_case : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =get_model_classes(__snake_case ) lowerCamelCase_ ={ model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes } return model_to_tester_mapping def a_ ( __snake_case : List[str] ) -> List[Any]: """simple docstring""" if isinstance(__snake_case , __snake_case ): return o elif isinstance(__snake_case , __snake_case ): return o.__name__ elif isinstance(__snake_case , (list, tuple) ): return [to_json(__snake_case ) for x in o] elif isinstance(__snake_case , __snake_case ): return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()} else: return o
6
1
'''simple docstring''' from __future__ import annotations import math def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" if num <= 0: lowerCamelCase_ =F'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(__snake_case ) lowerCamelCase_ =[True] * (num + 1) lowerCamelCase_ =[] lowerCamelCase_ =2 lowerCamelCase_ =int(math.sqrt(__snake_case ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(__snake_case ) # Set multiples of start be False for i in range(start * start , num + 1 , __snake_case ): if sieve[i] is True: lowerCamelCase_ =False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(__snake_case ) return prime if __name__ == "__main__": print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
6
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] )
6
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : Any = logging.get_logger(__name__) a_ : Tuple = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Any ='mobilenet_v2' def __init__( self, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=1.0, lowerCAmelCase=8, lowerCAmelCase=8, lowerCAmelCase=6, lowerCAmelCase=32, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase="relu6", lowerCAmelCase=True, lowerCAmelCase=0.8, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_0_1, lowerCAmelCase=255, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) lowerCamelCase_ =num_channels lowerCamelCase_ =image_size lowerCamelCase_ =depth_multiplier lowerCamelCase_ =depth_divisible_by lowerCamelCase_ =min_depth lowerCamelCase_ =expand_ratio lowerCamelCase_ =output_stride lowerCamelCase_ =first_layer_is_expansion lowerCamelCase_ =finegrained_output lowerCamelCase_ =hidden_act lowerCamelCase_ =tf_padding lowerCamelCase_ =classifier_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =semantic_loss_ignore_index class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =version.parse('1.11' ) @property def lowercase__ ( self ): """simple docstring""" return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def lowercase__ ( self ): """simple docstring""" if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def lowercase__ ( self ): """simple docstring""" return 1e-4
6
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] =['image_processor', 'tokenizer'] lowercase : Optional[int] ='AutoImageProcessor' lowercase : List[str] ='AutoTokenizer' def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =self.image_processor lowerCamelCase_ =False def __call__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase ) if len(lowerCAmelCase ) > 0: lowerCamelCase_ =args[0] lowerCamelCase_ =args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: lowerCamelCase_ =encodings['''input_ids'''] return inputs def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @contextmanager def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) lowerCamelCase_ =True lowerCamelCase_ =self.tokenizer yield lowerCamelCase_ =self.image_processor lowerCamelCase_ =False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ): """simple docstring""" if added_vocab is None: lowerCamelCase_ =self.tokenizer.get_added_vocab() lowerCamelCase_ ={} while tokens: lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE ) if start_token is None: break lowerCamelCase_ =start_token.group(1 ) lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE ) lowerCamelCase_ =start_token.group() if end_token is None: lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' ) else: lowerCamelCase_ =end_token.group() lowerCamelCase_ =re.escape(lowerCAmelCase ) lowerCamelCase_ =re.escape(lowerCAmelCase ) lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE ) if content is not None: lowerCamelCase_ =content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase ) if value: if len(lowerCAmelCase ) == 1: lowerCamelCase_ =value[0] lowerCamelCase_ =value else: # leaf nodes lowerCamelCase_ =[] for leaf in content.split(R'''<sep/>''' ): lowerCamelCase_ =leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": lowerCamelCase_ =leaf[1:-2] # for categorical special tokens output[key].append(lowerCAmelCase ) if len(output[key] ) == 1: lowerCamelCase_ =output[key][0] lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase ) if len(lowerCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging a_ : str = logging.get_logger(__name__) a_ : Optional[Any] = { """Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""", # See all Marian models at https://huggingface.co/models?filter=marian } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] ='marian' lowercase : Union[str, Any] =['past_key_values'] lowercase : Union[str, Any] ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=58_101, lowerCAmelCase=None, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=58_100, lowerCAmelCase=False, lowerCAmelCase=58_100, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =decoder_vocab_size or vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =share_encoder_decoder_embeddings super().__init__( pad_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) class __UpperCamelCase ( lowerCamelCase__ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowercase__ ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ =OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCamelCase_ ={0: '''batch'''} lowerCamelCase_ ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCamelCase_ ={0: '''batch''', 1: '''decoder_sequence'''} lowerCamelCase_ ={0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase, direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase_ =OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCamelCase_, lowerCamelCase_ =self.num_layers for i in range(lowerCAmelCase ): lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''} lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCamelCase_ =OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowercase__ ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ =super().outputs else: lowerCamelCase_ =super(lowerCAmelCase, self ).outputs if self.use_past: lowerCamelCase_, lowerCamelCase_ =self.num_layers for i in range(lowerCAmelCase ): lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''} lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =self._generate_dummy_inputs_for_encoder_and_decoder( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) # Generate decoder inputs lowerCamelCase_ =seq_length if not self.use_past else 1 lowerCamelCase_ =self._generate_dummy_inputs_for_encoder_and_decoder( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ ={f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} lowerCamelCase_ =dict(**lowerCAmelCase, **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCamelCase_, lowerCamelCase_ =common_inputs['''input_ids'''].shape lowerCamelCase_ =common_inputs['''decoder_input_ids'''].shape[1] lowerCamelCase_, lowerCamelCase_ =self.num_attention_heads lowerCamelCase_ =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase_ =decoder_seq_length + 3 lowerCamelCase_ =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase_ =torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowerCAmelCase, lowerCAmelCase )], dim=1 ) lowerCamelCase_ =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase_, lowerCamelCase_ =self.num_layers lowerCamelCase_ =min(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =max(lowerCAmelCase, lowerCAmelCase ) - min_num_layers lowerCamelCase_ ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. lowerCamelCase_ =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowerCAmelCase, lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =self._generate_dummy_inputs_for_encoder_and_decoder( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCamelCase_, lowerCamelCase_ =common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCamelCase_ =seqlen + 2 lowerCamelCase_, lowerCamelCase_ =self.num_layers lowerCamelCase_, lowerCamelCase_ =self.num_attention_heads lowerCamelCase_ =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase_ =common_inputs['''attention_mask'''].dtype lowerCamelCase_ =torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 ) lowerCamelCase_ =[ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =compute_effective_axis_dimension( lowerCAmelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase_ =tokenizer.num_special_tokens_to_add(lowerCAmelCase ) lowerCamelCase_ =compute_effective_axis_dimension( lowerCAmelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase_ =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase_ =dict(tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase ) ) return common_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ =self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase ) else: lowerCamelCase_ =self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase ) return common_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ =super()._flatten_past_key_values_(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) else: lowerCamelCase_ =super(lowerCAmelCase, self )._flatten_past_key_values_( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return 1e-4
6
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =ShapEImgaImgPipeline lowercase : Dict =['image'] lowercase : str =['image'] lowercase : int =[ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] lowercase : int =False @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return 32 @property def lowercase__ ( self ): """simple docstring""" return self.time_input_dim * 4 @property def lowercase__ ( self ): """simple docstring""" return 8 @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, ) lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =CLIPImageProcessor( crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, ) return image_processor @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowerCamelCase_ =PriorTransformer(**lowerCAmelCase ) return model @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ ={ '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowerCamelCase_ =ShapERenderer(**lowerCAmelCase ) return model def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_prior lowerCamelCase_ =self.dummy_image_encoder lowerCamelCase_ =self.dummy_image_processor lowerCamelCase_ =self.dummy_renderer lowerCamelCase_ =HeunDiscreteScheduler( beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, ) lowerCamelCase_ ={ '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowerCamelCase_ =np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase__ ( self ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device == '''cpu''' lowerCamelCase_ =True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =1 lowerCamelCase_ =2 lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) for key in inputs.keys(): if key in self.batch_params: lowerCamelCase_ =batch_size * [inputs[key]] lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) lowerCamelCase_ =pipe( lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
6
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor a_ : List[Any] = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''', lowerCAmelCase, ) super().__init__(*lowerCAmelCase, **lowerCAmelCase )
6
'''simple docstring''' from itertools import product def a_ ( __snake_case : int , __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =sides_number lowerCamelCase_ =max_face_number * dice_number lowerCamelCase_ =[0] * (max_total + 1) lowerCamelCase_ =1 lowerCamelCase_ =range(__snake_case , max_face_number + 1 ) for dice_numbers in product(__snake_case , repeat=__snake_case ): lowerCamelCase_ =sum(__snake_case ) totals_frequencies[total] += 1 return totals_frequencies def a_ ( ) -> float: """simple docstring""" lowerCamelCase_ =total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCamelCase_ =total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCamelCase_ =0 lowerCamelCase_ =9 lowerCamelCase_ =4 * 9 lowerCamelCase_ =6 for peter_total in range(__snake_case , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCamelCase_ =(4**9) * (6**6) lowerCamelCase_ =peter_wins_count / total_games_number lowerCamelCase_ =round(__snake_case , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json a_ : str = """sshleifer/mar_enro_6_3_student""" class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''', extract_compressed_file=lowerCAmelCase, ) lowerCamelCase_ =f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k''' @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" MarianMTModel.from_pretrained(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={ '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script lowerCamelCase_ =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() lowerCamelCase_ =bash_script.replace('''\\\n''', '''''' ).strip().replace('''"$@"''', '''''' ) for k, v in env_vars_to_replace.items(): lowerCamelCase_ =bash_script.replace(lowerCAmelCase, str(lowerCAmelCase ) ) lowerCamelCase_ =self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") lowerCamelCase_ =f''' --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 '''.split() # XXX: args.gpus > 1 : handle multi_gpu in the future lowerCamelCase_ =['''finetune.py'''] + bash_script.split() + args with patch.object(lowerCAmelCase, '''argv''', lowerCAmelCase ): lowerCamelCase_ =argparse.ArgumentParser() lowerCamelCase_ =pl.Trainer.add_argparse_args(lowerCAmelCase ) lowerCamelCase_ =SummarizationModule.add_model_specific_args(lowerCAmelCase, os.getcwd() ) lowerCamelCase_ =parser.parse_args() lowerCamelCase_ =main(lowerCAmelCase ) # Check metrics lowerCamelCase_ =load_json(model.metrics_save_path ) lowerCamelCase_ =metrics['''val'''][0] lowerCamelCase_ =metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ), (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''], lowerCAmelCase ) self.assertGreater(last_step_stats['''val_avg_gen_time'''], 0.0_1 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''], 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''], 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''], 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ), 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =[x for x in contents if x.endswith('''.ckpt''' )][0] lowerCamelCase_ =os.path.join(args.output_dir, lowerCAmelCase ) lowerCamelCase_ =torch.load(lowerCAmelCase, map_location='''cpu''' ) lowerCamelCase_ ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: lowerCamelCase_ ={os.path.basename(lowerCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class __UpperCamelCase ( lowerCamelCase__ ): @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =f'''{self.test_file_dir_str}/test_data/wmt_en_ro''' lowerCamelCase_ ={ '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 128, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script lowerCamelCase_ =( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) lowerCamelCase_ =bash_script.replace('''\\\n''', '''''' ).strip().replace('''"$@"''', '''''' ) lowerCamelCase_ =bash_script.replace('''--fp16 ''', ''' ''' ) for k, v in env_vars_to_replace.items(): lowerCamelCase_ =bash_script.replace(lowerCAmelCase, str(lowerCAmelCase ) ) lowerCamelCase_ =self.get_auto_remove_tmp_dir() lowerCamelCase_ =bash_script.replace('''--fp16''', '''''' ) lowerCamelCase_ =6 lowerCamelCase_ =( ['''distillation.py'''] + bash_script.split() + [ f'''--output_dir={output_dir}''', '''--gpus=1''', '''--learning_rate=1e-3''', f'''--num_train_epochs={epochs}''', '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(lowerCAmelCase, '''argv''', lowerCAmelCase ): lowerCamelCase_ =argparse.ArgumentParser() lowerCamelCase_ =pl.Trainer.add_argparse_args(lowerCAmelCase ) lowerCamelCase_ =SummarizationDistiller.add_model_specific_args(lowerCAmelCase, os.getcwd() ) lowerCamelCase_ =parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu lowerCamelCase_ =distill_main(lowerCAmelCase ) # Check metrics lowerCamelCase_ =load_json(model.metrics_save_path ) lowerCamelCase_ =metrics['''val'''][0] lowerCamelCase_ =metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.0_1 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''], lowerCAmelCase ) # check lightning ckpt can be loaded and has a reasonable statedict lowerCamelCase_ =os.listdir(lowerCAmelCase ) lowerCamelCase_ =[x for x in contents if x.endswith('''.ckpt''' )][0] lowerCamelCase_ =os.path.join(args.output_dir, lowerCAmelCase ) lowerCamelCase_ =torch.load(lowerCAmelCase, map_location='''cpu''' ) lowerCamelCase_ ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: lowerCamelCase_ ={os.path.basename(lowerCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
6
'''simple docstring''' import os from typing import Dict, List, Tuple, TypeVar, Union a_ : Tuple = TypeVar("""T""") a_ : Dict = Union[List[T], Tuple[T, ...]] a_ : int = Union[T, List[T], Dict[str, T]] a_ : Optional[Any] = Union[str, bytes, os.PathLike]
6
1
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[str] =AutoencoderKL lowercase : Optional[int] ='sample' lowercase : Dict =1E-2 @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =4 lowerCamelCase_ =3 lowerCamelCase_ =(32, 32) lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) return {"sample": image} @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={ '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } lowerCamelCase_ =self.dummy_input return init_dict, inputs_dict def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" pass @unittest.skipIf(torch_device == '''mps''', '''Gradient checkpointing skipped on MPS''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.prepare_init_args_and_inputs_for_common() lowerCamelCase_ =self.model_class(**lowerCAmelCase ) model.to(lowerCAmelCase ) assert not model.is_gradient_checkpointing and model.training lowerCamelCase_ =model(**lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() lowerCamelCase_ =torch.randn_like(lowerCAmelCase ) lowerCamelCase_ =(out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing lowerCamelCase_ =self.model_class(**lowerCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowerCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training lowerCamelCase_ =model_a(**lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() lowerCamelCase_ =(out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) lowerCamelCase_ =dict(model.named_parameters() ) lowerCamelCase_ =dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data, named_params_a[name].grad.data, atol=5e-5 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''', output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ), 0 ) model.to(lowerCAmelCase ) lowerCamelCase_ =model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) lowerCamelCase_ =model.to(lowerCAmelCase ) model.eval() if torch_device == "mps": lowerCamelCase_ =torch.manual_seed(0 ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) lowerCamelCase_ =torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), ) lowerCamelCase_ =image.to(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase, sample_posterior=lowerCAmelCase, generator=lowerCAmelCase ).sample lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": lowerCamelCase_ =torch.tensor( [ -4.0_078e-01, -3.8_323e-04, -1.2_681e-01, -1.1_462e-01, 2.0_095e-01, 1.0_893e-01, -8.8_247e-02, -3.0_361e-01, -9.8_644e-03, ] ) elif torch_device == "cpu": lowerCamelCase_ =torch.tensor( [-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] ) else: lowerCamelCase_ =torch.tensor( [-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] ) self.assertTrue(torch_all_close(lowerCAmelCase, lowerCAmelCase, rtol=1e-2 ) ) @slow class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase ) for s in shape] )}.npy''' def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self, lowerCAmelCase=0, lowerCAmelCase=(4, 3, 512, 512), lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =torch.floataa if fpaa else torch.floataa lowerCamelCase_ =torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase, lowerCAmelCase ) ) ).to(lowerCAmelCase ).to(lowerCAmelCase ) return image def lowercase__ ( self, lowerCAmelCase="CompVis/stable-diffusion-v1-4", lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ ='''fp16''' if fpaa else None lowerCamelCase_ =torch.floataa if fpaa else torch.floataa lowerCamelCase_ =AutoencoderKL.from_pretrained( lowerCAmelCase, subfolder='''vae''', torch_dtype=lowerCAmelCase, revision=lowerCAmelCase, ) model.to(lowerCAmelCase ).eval() return model def lowercase__ ( self, lowerCAmelCase=0 ): """simple docstring""" if torch_device == "mps": return torch.manual_seed(lowerCAmelCase ) return torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_sd_vae_model() lowerCamelCase_ =self.get_sd_image(lowerCAmelCase ) lowerCamelCase_ =self.get_generator(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase, generator=lowerCAmelCase, sample_posterior=lowerCAmelCase ).sample assert sample.shape == image.shape lowerCamelCase_ =sample[-1, -2:, -2:, :2].flatten().float().cpu() lowerCamelCase_ =torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]], [47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]], # fmt: on ] ) @require_torch_gpu def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_sd_vae_model(fpaa=lowerCAmelCase ) lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, fpaa=lowerCAmelCase ) lowerCamelCase_ =self.get_generator(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase, generator=lowerCAmelCase, sample_posterior=lowerCAmelCase ).sample assert sample.shape == image.shape lowerCamelCase_ =sample[-1, -2:, :2, -2:].flatten().float().cpu() lowerCamelCase_ =torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_sd_vae_model() lowerCamelCase_ =self.get_sd_image(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).sample assert sample.shape == image.shape lowerCamelCase_ =sample[-1, -2:, -2:, :2].flatten().float().cpu() lowerCamelCase_ =torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]], [37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]], # fmt: on ] ) @require_torch_gpu def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_sd_vae_model() lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, shape=(3, 4, 64, 64) ) with torch.no_grad(): lowerCamelCase_ =model.decode(lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowerCamelCase_ =sample[-1, -2:, :2, -2:].flatten().cpu() lowerCamelCase_ =torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]], [16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]], # fmt: on ] ) @require_torch_gpu def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_sd_vae_model(fpaa=lowerCAmelCase ) lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, shape=(3, 4, 64, 64), fpaa=lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model.decode(lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowerCamelCase_ =sample[-1, -2:, :2, -2:].flatten().float().cpu() lowerCamelCase_ =torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available(), reason='''xformers is not required when using PyTorch 2.0.''' ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_sd_vae_model(fpaa=lowerCAmelCase ) lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, shape=(3, 4, 64, 64), fpaa=lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model.decode(lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowerCamelCase_ =model.decode(lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available(), reason='''xformers is not required when using PyTorch 2.0.''' ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_sd_vae_model() lowerCamelCase_ =self.get_sd_image(lowerCAmelCase, shape=(3, 4, 64, 64) ) with torch.no_grad(): lowerCamelCase_ =model.decode(lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowerCamelCase_ =model.decode(lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]], [47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]], # fmt: on ] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.get_sd_vae_model() lowerCamelCase_ =self.get_sd_image(lowerCAmelCase ) lowerCamelCase_ =self.get_generator(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model.encode(lowerCAmelCase ).latent_dist lowerCamelCase_ =dist.sample(generator=lowerCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] lowerCamelCase_ =sample[0, -1, -3:, -3:].flatten().cpu() lowerCamelCase_ =torch.tensor(lowerCAmelCase ) lowerCamelCase_ =3e-3 if torch_device != '''mps''' else 1e-2 assert torch_all_close(lowerCAmelCase, lowerCAmelCase, atol=lowerCAmelCase )
6
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =search_prob lowerCamelCase_ =start_temperate lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =None while not search_end: lowerCamelCase_ =current_state.score() if best_state is None or current_score > best_state.score(): lowerCamelCase_ =current_state scores.append(__snake_case ) iterations += 1 lowerCamelCase_ =None lowerCamelCase_ =current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor lowerCamelCase_ =neighbors.pop(__snake_case ) lowerCamelCase_ =picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowerCamelCase_ =change * -1 # in case we are finding minimum if change > 0: # improves the solution lowerCamelCase_ =picked_neighbor else: lowerCamelCase_ =(math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowerCamelCase_ =picked_neighbor lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowerCamelCase_ =True else: lowerCamelCase_ =next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__snake_case ) , __snake_case ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str: """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing( prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) a_ : List[str] = simulated_annealing( prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return (3 * x**2) - (6 * y) a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" ) a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" )
6
1
'''simple docstring''' import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def a_ ( __snake_case : List[str] ) -> Dict: """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() ) @pytest.fixture def a_ ( __snake_case : Dict ) -> Tuple: """simple docstring""" class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =metric_id class __UpperCamelCase : lowercase : Union[str, Any] =[MetricMock(lowerCamelCase__ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def lowercase__ ( self ): """simple docstring""" return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() ) @pytest.mark.parametrize( '''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def a_ ( __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if "tmp_path" in args: lowerCamelCase_ =tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(__snake_case , match='''https://huggingface.co/docs/evaluate''' ): func(*__snake_case )
6
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a_ ( __snake_case : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def a_ ( __snake_case : List[Any] ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =emb.weight.shape lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case ) lowerCamelCase_ =emb.weight.data return lin_layer def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict: """simple docstring""" lowerCamelCase_ ={} for old_key in state_dict.keys(): lowerCamelCase_ =old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' ) else: lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCamelCase_ =state_dict[old_key] return new_dict def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =0 os.makedirs(__snake_case , exist_ok=__snake_case ) for expert in range(__snake_case ): lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(__snake_case ): lowerCamelCase_ =torch.load(__snake_case )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =os.path.join( __snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) torch.save(__snake_case , __snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__snake_case )[0]].dtype ) # Add the last block lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__snake_case ) == 1: lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) torch.save(__snake_case , __snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__snake_case , __snake_case ) # Otherwise, let's build the index lowerCamelCase_ ={} for idx, shard in enumerate(__snake_case ): lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' ) lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) for key in shard: lowerCamelCase_ =shard_file # Add the metadata lowerCamelCase_ ={'''total_size''': total_size} lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a_ : Tuple = parser.parse_args() a_ , a_ : int = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_28, args.dtype, ) a_ : Tuple = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28 ) config.save_pretrained(args.pytorch_dump_folder_path) a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
6
1
'''simple docstring''' from sklearn.metrics import recall_score import datasets a_ : List[Any] = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ a_ : Any = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ a_ : Optional[Any] = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ), reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''], ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=1, lowerCAmelCase="binary", lowerCAmelCase=None, lowerCAmelCase="warn", ): """simple docstring""" lowerCamelCase_ =recall_score( lowerCAmelCase, lowerCAmelCase, labels=lowerCAmelCase, pos_label=lowerCAmelCase, average=lowerCAmelCase, sample_weight=lowerCAmelCase, zero_division=lowerCAmelCase, ) return {"recall": float(lowerCAmelCase ) if score.size == 1 else score}
6
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =['image_processor', 'tokenizer'] lowercase : int ='LayoutLMv2ImageProcessor' lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension) lowerCamelCase_ =features['''words'''] lowerCamelCase_ =self.tokenizer( text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, ) # add pixel values lowerCamelCase_ =features.pop('''pixel_values''' ) if return_overflowing_tokens is True: lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] ) lowerCamelCase_ =images return encoded_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowerCAmelCase ) != len(lowerCAmelCase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' ) return images_with_overflow def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, ) return self.image_processor_class @property def lowercase__ ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, ) return self.image_processor
6
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[torch.FloatTensor] =None lowercase : torch.FloatTensor =None lowercase : Optional[Tuple[torch.FloatTensor]] =None lowercase : Optional[Tuple[torch.FloatTensor]] =None class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=512, lowerCAmelCase="cls", lowerCAmelCase=False, lowerCAmelCase=True, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =project_dim lowerCamelCase_ =pooler_fn lowerCamelCase_ =learn_encoder lowerCamelCase_ =use_attention_mask class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =[r'pooler', r'logit_scale'] lowercase : Union[str, Any] =[r'position_ids', r'predictions.decoder.bias'] lowercase : Any ='roberta' lowercase : Optional[Any] =RobertaSeriesConfig def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =XLMRobertaModel(lowerCAmelCase ) lowerCamelCase_ =nn.Linear(config.hidden_size, config.project_dim ) lowerCamelCase_ =getattr(lowerCAmelCase, '''has_pre_transformation''', lowerCAmelCase ) if self.has_pre_transformation: lowerCamelCase_ =nn.Linear(config.hidden_size, config.project_dim ) lowerCamelCase_ =nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps ) self.post_init() def lowercase__ ( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase_ =self.base_model( input_ids=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, position_ids=lowerCAmelCase, head_mask=lowerCAmelCase, inputs_embeds=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=lowerCAmelCase, output_attentions=lowerCAmelCase, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=lowerCAmelCase, ) if self.has_pre_transformation: lowerCamelCase_ =outputs['''hidden_states'''][-2] lowerCamelCase_ =self.pre_LN(lowerCAmelCase ) lowerCamelCase_ =self.transformation_pre(lowerCAmelCase ) return TransformationModelOutput( projection_state=lowerCAmelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: lowerCamelCase_ =self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=lowerCAmelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
6
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =VQModel lowercase : Union[str, Any] ='sample' @property def lowercase__ ( self, lowerCAmelCase=(32, 32) ): """simple docstring""" lowerCamelCase_ =4 lowerCamelCase_ =3 lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) return {"sample": image} @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) @property def lowercase__ ( self ): """simple docstring""" return (3, 32, 32) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={ '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 3, } lowerCamelCase_ =self.dummy_input return init_dict, inputs_dict def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info['''missing_keys'''] ), 0 ) model.to(lowerCAmelCase ) lowerCamelCase_ =model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(lowerCAmelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size ) lowerCamelCase_ =image.to(lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase ).sample lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu() # fmt: off lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
6
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Any = logging.get_logger(__name__) a_ : Optional[int] = { """Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""", """Salesforce/blip-vqa-capfit-large""": ( """https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json""" ), """Salesforce/blip-image-captioning-base""": ( """https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json""" ), """Salesforce/blip-image-captioning-large""": ( """https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json""" ), """Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""", """Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""", """Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""", """Salesforce/blip-itm-large-flikr""": ( """https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='blip_text_model' def __init__( self, lowerCAmelCase=30_524, lowerCAmelCase=768, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=8, lowerCAmelCase=512, lowerCAmelCase="gelu", lowerCAmelCase=1e-12, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=30_522, lowerCAmelCase=2, lowerCAmelCase=0, lowerCAmelCase=102, lowerCAmelCase=True, lowerCAmelCase=True, **lowerCAmelCase, ): """simple docstring""" super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, sep_token_id=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =encoder_hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act lowerCamelCase_ =initializer_range lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =is_decoder lowerCamelCase_ =use_cache @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the text config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": lowerCamelCase_ =config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] ='blip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=384, lowerCAmelCase=16, lowerCAmelCase="gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=1e-10, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] ='blip' lowercase : Tuple =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=512, lowerCAmelCase=2.6_5_9_2, lowerCAmelCase=256, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' ) lowerCamelCase_ =BlipTextConfig(**lowerCAmelCase ) lowerCamelCase_ =BlipVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =self.vision_config.hidden_size lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 lowerCamelCase_ =0.0_2 lowerCamelCase_ =image_text_hidden_size @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
6
'''simple docstring''' import datasets from .evaluate import evaluate a_ : List[Any] = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ a_ : List[Any] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ a_ : Any = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the CUAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer 'aupr': Area Under the Precision-Recall curve 'prec_at_80_recall': Precision at 80% recall 'prec_at_90_recall': Precision at 90% recall Examples: >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} lowerCamelCase_ =[ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase ) return score
6
1
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ={} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=1 ): """simple docstring""" if self.graph.get(lowerCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: lowerCamelCase_ =[[w, v]] if not self.graph.get(lowerCAmelCase ): lowerCamelCase_ =[] def lowercase__ ( self ): """simple docstring""" return list(self.graph ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if self.graph.get(lowerCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ): """simple docstring""" if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase ) != 0: lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return visited def lowercase__ ( self, lowerCAmelCase=-1 ): """simple docstring""" if c == -1: lowerCamelCase_ =floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase, lowerCAmelCase, 1 ) def lowercase__ ( self, lowerCAmelCase=-2 ): """simple docstring""" lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return len(self.graph[u] ) def lowercase__ ( self, lowerCAmelCase=-2 ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCamelCase_ =s lowerCamelCase_ =[] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCAmelCase ) != 0: lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return sorted_nodes def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(lowerCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(lowerCAmelCase ) != 0: lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1] else: lowerCamelCase_ =False indirect_parents.append(lowerCAmelCase ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return list(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(lowerCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(lowerCAmelCase ) != 0: lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1] else: lowerCamelCase_ =False indirect_parents.append(lowerCAmelCase ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return False def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ): """simple docstring""" lowerCamelCase_ =time() self.dfs(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =time() return end - begin def lowercase__ ( self, lowerCAmelCase=-2 ): """simple docstring""" lowerCamelCase_ =time() self.bfs(lowerCAmelCase ) lowerCamelCase_ =time() return end - begin class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ ={} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=1 ): """simple docstring""" if self.graph.get(lowerCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist lowerCamelCase_ =[[w, v]] # add the other way if self.graph.get(lowerCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist lowerCamelCase_ =[[w, u]] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if self.graph.get(lowerCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase ) # the other way round if self.graph.get(lowerCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ): """simple docstring""" if s == d: return [] lowerCamelCase_ =[] lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCamelCase_ =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase ) != 0: lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1] else: lowerCamelCase_ =ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return visited def lowercase__ ( self, lowerCAmelCase=-1 ): """simple docstring""" if c == -1: lowerCamelCase_ =floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): lowerCamelCase_ =floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase, lowerCAmelCase, 1 ) def lowercase__ ( self, lowerCAmelCase=-2 ): """simple docstring""" lowerCamelCase_ =deque() lowerCamelCase_ =[] if s == -2: lowerCamelCase_ =list(self.graph )[0] d.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) while d: lowerCamelCase_ =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return len(self.graph[u] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(lowerCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(lowerCAmelCase ) != 0: lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1] else: lowerCamelCase_ =False indirect_parents.append(lowerCAmelCase ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return list(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =list(self.graph )[0] stack.append(lowerCAmelCase ) visited.append(lowerCAmelCase ) lowerCamelCase_ =-2 lowerCamelCase_ =[] lowerCamelCase_ =s lowerCamelCase_ =False lowerCamelCase_ =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: lowerCamelCase_ =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): lowerCamelCase_ =len(lowerCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) lowerCamelCase_ =node[1] break # check if all the children are visited if s == ss: stack.pop() lowerCamelCase_ =True if len(lowerCAmelCase ) != 0: lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1] else: lowerCamelCase_ =False indirect_parents.append(lowerCAmelCase ) lowerCamelCase_ =s lowerCamelCase_ =ss # check if se have reached the starting point if len(lowerCAmelCase ) == 0: return False def lowercase__ ( self ): """simple docstring""" return list(self.graph ) def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ): """simple docstring""" lowerCamelCase_ =time() self.dfs(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =time() return end - begin def lowercase__ ( self, lowerCAmelCase=-2 ): """simple docstring""" lowerCamelCase_ =time() self.bfs(lowerCAmelCase ) lowerCamelCase_ =time() return end - begin
6
'''simple docstring''' import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer a_ : Tuple = logging.get_logger(__name__) a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : str = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } a_ : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 5_12, """facebook/dpr-ctx_encoder-multiset-base""": 5_12, } a_ : List[Any] = { """facebook/dpr-question_encoder-single-nq-base""": 5_12, """facebook/dpr-question_encoder-multiset-base""": 5_12, } a_ : Optional[Any] = { """facebook/dpr-reader-single-nq-base""": 5_12, """facebook/dpr-reader-multiset-base""": 5_12, } a_ : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : List[str] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } a_ : Dict = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] =VOCAB_FILES_NAMES lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : Dict =DPRContextEncoderTokenizer class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase : List[Any] =DPRQuestionEncoderTokenizer a_ : Union[str, Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) a_ : Dict = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Return: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(lowerCamelCase__ ) class __UpperCamelCase : def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" if titles is None and texts is None: return super().__call__( lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) elif titles is None or texts is None: lowerCamelCase_ =titles if texts is None else texts return super().__call__( lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles] lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages assert len(lowerCAmelCase ) == len( lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.''' lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids'''] lowerCamelCase_ ={ '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase ) ] } if return_attention_mask is not False: lowerCamelCase_ =[] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowerCamelCase_ =attention_mask return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ): """simple docstring""" lowerCamelCase_ =reader_input['''input_ids'''] lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3] lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ ) lowerCamelCase_ =[] for doc_id in sorted_docs: lowerCamelCase_ =list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowerCamelCase_ =sequence_ids.index(self.pad_token_id ) else: lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) ) if len(lowerCAmelCase ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =[] for start_index, start_score in enumerate(lowerCAmelCase ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase ) lowerCamelCase_ =[] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' lowerCamelCase_ =end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(lowerCAmelCase ) == top_spans: break return chosen_span_intervals @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : int =VOCAB_FILES_NAMES lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION lowercase : int =['input_ids', 'attention_mask'] lowercase : Dict =DPRReaderTokenizer
6
1
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 32, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], lowerCAmelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], lowerCAmelCase = True, lowerCAmelCase=7, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=3, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =do_resize lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 288} lowerCamelCase_ =size_divisor lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =do_center_crop lowerCamelCase_ =image_mean lowerCamelCase_ =image_std lowerCamelCase_ =do_pad lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution def lowercase__ ( self ): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" if not batched: lowerCamelCase_ =self.size['''shortest_edge'''] lowerCamelCase_ =image_inputs[0] if isinstance(lowerCAmelCase, Image.Image ): lowerCamelCase_, lowerCamelCase_ =image.size else: lowerCamelCase_, lowerCamelCase_ =image.shape[1], image.shape[2] lowerCamelCase_ =size / min(lowerCAmelCase, lowerCAmelCase ) if h < w: lowerCamelCase_, lowerCamelCase_ =size, scale * w else: lowerCamelCase_, lowerCamelCase_ =scale * h, size lowerCamelCase_ =int((1_333 / 800) * size ) if max(lowerCAmelCase, lowerCAmelCase ) > max_size: lowerCamelCase_ =max_size / max(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =newh * scale lowerCamelCase_ =neww * scale lowerCamelCase_, lowerCamelCase_ =int(newh + 0.5 ), int(neww + 0.5 ) lowerCamelCase_, lowerCamelCase_ =( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCamelCase_ =[] for image in image_inputs: lowerCamelCase_, lowerCamelCase_ =self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[0] )[0] lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Optional[Any] =BridgeTowerImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BridgeTowerImageProcessingTester(self ) @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''size_divisor''' ) ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, np.ndarray ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, torch.Tensor ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
6
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def a_ ( ) -> Tuple: """simple docstring""" lowerCamelCase_ ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowerCamelCase_ =Dataset.from_dict(__snake_case ) return dataset class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ), 2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase ) self.assertEqual(len(lowerCAmelCase ), 2 ) print(lowerCAmelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
6
1
'''simple docstring''' import enum import shutil import sys a_ , a_ : List[str] = shutil.get_terminal_size() a_ : Dict = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""} class __UpperCamelCase ( enum.Enum ): lowercase : Optional[int] =0 lowercase : Any =1 def a_ ( __snake_case : Dict , __snake_case : Tuple="" ) -> Dict: """simple docstring""" sys.stdout.write(str(__snake_case ) + end ) sys.stdout.flush() def a_ ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : Dict="" ) -> Optional[Any]: """simple docstring""" forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , __snake_case ) def a_ ( ) -> int: """simple docstring""" forceWrite('''\r''' ) def a_ ( __snake_case : int , __snake_case : str ) -> Tuple: """simple docstring""" forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' ) def a_ ( ) -> Any: """simple docstring""" forceWrite(''' ''' * TERMINAL_WIDTH ) reset_cursor() def a_ ( ) -> int: """simple docstring""" reset_cursor() forceWrite('''-''' * TERMINAL_WIDTH )
6
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a_ : Any = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
1
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer a_ : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Any ='AutoTokenizer' lowercase : Union[str, Any] =['tokenizer'] lowercase : List[Any] ={ 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =speaker_embeddings @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase="speaker_embeddings_path.json", **lowerCAmelCase ): """simple docstring""" if speaker_embeddings_dict_path is not None: lowerCamelCase_ =get_file_from_repo( lowerCAmelCase, lowerCAmelCase, subfolder=kwargs.pop('''subfolder''', lowerCAmelCase ), cache_dir=kwargs.pop('''cache_dir''', lowerCAmelCase ), force_download=kwargs.pop('''force_download''', lowerCAmelCase ), proxies=kwargs.pop('''proxies''', lowerCAmelCase ), resume_download=kwargs.pop('''resume_download''', lowerCAmelCase ), local_files_only=kwargs.pop('''local_files_only''', lowerCAmelCase ), use_auth_token=kwargs.pop('''use_auth_token''', lowerCAmelCase ), revision=kwargs.pop('''revision''', lowerCAmelCase ), ) if speaker_embeddings_path is None: logger.warning( f'''`{os.path.join(lowerCAmelCase, lowerCAmelCase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) lowerCamelCase_ =None else: with open(lowerCAmelCase ) as speaker_embeddings_json: lowerCamelCase_ =json.load(lowerCAmelCase ) else: lowerCamelCase_ =None lowerCamelCase_ =AutoTokenizer.from_pretrained(lowerCAmelCase, **lowerCAmelCase ) return cls(tokenizer=lowerCAmelCase, speaker_embeddings=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase="speaker_embeddings_path.json", lowerCAmelCase="speaker_embeddings", lowerCAmelCase = False, **lowerCAmelCase, ): """simple docstring""" if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowerCAmelCase, lowerCAmelCase, '''v2''' ), exist_ok=lowerCAmelCase ) lowerCamelCase_ ={} lowerCamelCase_ =save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": lowerCamelCase_ =self._load_voice_preset(lowerCAmelCase ) lowerCamelCase_ ={} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''], lowerCAmelCase, f'''{prompt_key}_{key}''' ), voice_preset[key], allow_pickle=lowerCAmelCase, ) lowerCamelCase_ =os.path.join(lowerCAmelCase, f'''{prompt_key}_{key}.npy''' ) lowerCamelCase_ =tmp_dict with open(os.path.join(lowerCAmelCase, lowerCAmelCase ), '''w''' ) as fp: json.dump(lowerCAmelCase, lowerCAmelCase ) super().save_pretrained(lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase = None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.speaker_embeddings[voice_preset] lowerCamelCase_ ={} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) lowerCamelCase_ =get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''', '''/''' ), voice_preset_paths[key], subfolder=kwargs.pop('''subfolder''', lowerCAmelCase ), cache_dir=kwargs.pop('''cache_dir''', lowerCAmelCase ), force_download=kwargs.pop('''force_download''', lowerCAmelCase ), proxies=kwargs.pop('''proxies''', lowerCAmelCase ), resume_download=kwargs.pop('''resume_download''', lowerCAmelCase ), local_files_only=kwargs.pop('''local_files_only''', lowerCAmelCase ), use_auth_token=kwargs.pop('''use_auth_token''', lowerCAmelCase ), revision=kwargs.pop('''revision''', lowerCAmelCase ), ) if path is None: raise ValueError( f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path', '/' ), voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) lowerCamelCase_ =np.load(lowerCAmelCase ) return voice_preset_dict def lowercase__ ( self, lowerCAmelCase = None ): """simple docstring""" for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key], np.ndarray ): raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase="pt", lowerCAmelCase=256, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=False, **lowerCAmelCase, ): """simple docstring""" if voice_preset is not None and not isinstance(lowerCAmelCase, lowerCAmelCase ): if ( isinstance(lowerCAmelCase, lowerCAmelCase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): lowerCamelCase_ =self._load_voice_preset(lowerCAmelCase ) else: if isinstance(lowerCAmelCase, lowerCAmelCase ) and not voice_preset.endswith('''.npz''' ): lowerCamelCase_ =voice_preset + '''.npz''' lowerCamelCase_ =np.load(lowerCAmelCase ) if voice_preset is not None: self._validate_voice_preset_dict(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer( lowerCAmelCase, return_tensors=lowerCAmelCase, padding='''max_length''', max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, add_special_tokens=lowerCAmelCase, **lowerCAmelCase, ) if voice_preset is not None: lowerCamelCase_ =voice_preset return encoded_text
6
'''simple docstring''' from collections import defaultdict from math import gcd def a_ ( __snake_case : int = 150_0000 ) -> int: """simple docstring""" lowerCamelCase_ =defaultdict(__snake_case ) lowerCamelCase_ =2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ): if gcd(__snake_case , __snake_case ) > 1: continue lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__snake_case , limit + 1 , __snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
6
1
'''simple docstring''' import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =IFImgaImgSuperResolutionPipeline lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} lowercase : Tuple =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) lowercase : List[Any] =PipelineTesterMixin.required_optional_params - {'latents'} def lowercase__ ( self ): """simple docstring""" return self._get_superresolution_dummy_components() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =floats_tensor((1, 3, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def lowercase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def lowercase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' ) def lowercase__ ( self ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def lowercase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def lowercase__ ( self ): """simple docstring""" self._test_save_load_local() def lowercase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2, )
6
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Tuple = 16 a_ : Optional[int] = 32 def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__snake_case : int ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_ =datasets.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_ =16 elif accelerator.mixed_precision != "no": lowerCamelCase_ =8 else: lowerCamelCase_ =None return tokenizer.pad( __snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCamelCase_ =DataLoader( tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) lowerCamelCase_ =DataLoader( tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ : Tuple = mocked_dataloaders # noqa: F811 def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1": lowerCamelCase_ =2 # Initialize accelerator lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ =config['''lr'''] lowerCamelCase_ =int(config['''num_epochs'''] ) lowerCamelCase_ =int(config['''seed'''] ) lowerCamelCase_ =int(config['''batch_size'''] ) lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__snake_case ) def inner_training_loop(__snake_case : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ =model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case ) lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case ) # Instantiate scheduler lowerCamelCase_ =get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.logits.argmax(dim=-1 ) lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) lowerCamelCase_ =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __snake_case ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowerCamelCase_ =parser.parse_args() lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
6
1