code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers lowerCAmelCase__ : Optional[Any] ='3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
101
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Any = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A ( _a ): lowercase_ = 'roformer' def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) _a = vocab_size _a = hidden_size if embedding_size is None else embedding_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = layer_norm_eps _a = rotary_value _a = use_cache class A ( _a ): @property def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} _a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
22
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ : Union[str, Any] = logging.get_logger(__name__) __magic_name__ : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase : Any = """megatron-bert""" def __init__( self , _A=2_9_0_5_6 , _A=1_0_2_4 , _A=2_4 , _A=1_6 , _A=4_0_9_6 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=2 , _A=0.02 , _A=1e-1_2 , _A=0 , _A="absolute" , _A=True , **_A , ): '''simple docstring''' super().__init__(pad_token_id=_A , **_A ) UpperCamelCase : int = vocab_size UpperCamelCase : int = hidden_size UpperCamelCase : Tuple = num_hidden_layers UpperCamelCase : Tuple = num_attention_heads UpperCamelCase : Any = hidden_act UpperCamelCase : Dict = intermediate_size UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : List[Any] = attention_probs_dropout_prob UpperCamelCase : Union[str, Any] = max_position_embeddings UpperCamelCase : int = type_vocab_size UpperCamelCase : List[Any] = initializer_range UpperCamelCase : Union[str, Any] = layer_norm_eps UpperCamelCase : Optional[int] = position_embedding_type UpperCamelCase : int = use_cache
102
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A : lowercase_ = 42 lowercase_ = 42 class A : def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str: """simple docstring""" _a = [[] for _ in range(lowerCAmelCase_ )] _a = size def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex] ) @property def __lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" return self._size def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict: """simple docstring""" if weight not in (0, 1): raise ValueError('''Edge weight must be either 0 or 1.''' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('''Vertex indexes must be in [0; size).''' ) self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None: """simple docstring""" _a = deque([start_vertex] ) _a = [None] * self.size _a = 0 while queue: _a = queue.popleft() _a = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _a = current_distance + edge.weight _a = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and new_distance >= dest_vertex_distance ): continue _a = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('''No path from start_vertex to finish_vertex.''' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
103
'''simple docstring''' from math import pi, sqrt def snake_case_ (UpperCamelCase : float ): '''simple docstring''' if num <= 0: raise ValueError('''math domain error''' ) if num > 171.5: raise OverflowError('''math range error''' ) elif num - int(UpperCamelCase ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(UpperCamelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def snake_case_ (): '''simple docstring''' assert gamma(0.5 ) == sqrt(UpperCamelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _snake_case : Optional[Any] = 1.0 while num: _snake_case : Dict = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
22
0
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def _lowerCamelCase ( UpperCAmelCase_ : int ) -> Optional[Any]: """simple docstring""" def is_in_circle(UpperCAmelCase_ : float, UpperCAmelCase_ : float ) -> bool: A__ = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle A__ = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(UpperCAmelCase_ ) ) # The ratio of the area for circle to square is pi/4. A__ = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : Callable[[float], float], UpperCAmelCase_ : float = 0.0, UpperCAmelCase_ : float = 1.0, ) -> float: """simple docstring""" return mean( function_to_integrate(uniform(UpperCAmelCase_, UpperCAmelCase_ ) ) for _ in range(UpperCAmelCase_ ) ) * (max_value - min_value) def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : float = 0.0, UpperCAmelCase_ : float = 1.0 ) -> None: """simple docstring""" def identity_function(UpperCAmelCase_ : float ) -> float: return x A__ = area_under_curve_estimator( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) A__ = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def _lowerCamelCase ( UpperCAmelCase_ : int ) -> None: """simple docstring""" def function_to_integrate(UpperCAmelCase_ : float ) -> float: return sqrt(4.0 - x * x ) A__ = area_under_curve_estimator( UpperCAmelCase_, UpperCAmelCase_, 0.0, 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
104
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
22
0
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem UpperCamelCase__ : List[Any] = importlib.util.find_spec('''s3fs''') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 UpperCamelCase__ : List[compression.BaseCompressedFileFileSystem] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __UpperCAmelCase ( lowerCamelCase_ : str ) -> str: """simple docstring""" if "://" in dataset_path: SCREAMING_SNAKE_CASE_ : Any = dataset_path.split('://' )[1] return dataset_path def __UpperCAmelCase ( lowerCamelCase_ : fsspec.AbstractFileSystem ) -> bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def __UpperCAmelCase ( lowerCamelCase_ : fsspec.AbstractFileSystem , lowerCamelCase_ : str , lowerCamelCase_ : str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = not is_remote_filesystem(lowerCamelCase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) ) else: fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ ) def __UpperCAmelCase ( ) -> None: """simple docstring""" if hasattr(fsspec.asyn , 'reset_lock' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: SCREAMING_SNAKE_CASE_ : Any = None SCREAMING_SNAKE_CASE_ : Dict = None SCREAMING_SNAKE_CASE_ : List[str] = threading.Lock()
105
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' _snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' _snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def snake_case_ (UpperCamelCase : Tuple ): '''simple docstring''' def remove_articles(UpperCamelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase ) def white_space_fix(UpperCamelCase : Union[str, Any] ): return " ".join(text.split() ) def remove_punc(UpperCamelCase : str ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCamelCase : Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) ) def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ): '''simple docstring''' _a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )] return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100 def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(UpperCamelCase ) _a = Counter(UpperCamelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(UpperCamelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = keeptmpscorea / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(UpperCamelCase ) > 0: _a = deltmpscorea / len(UpperCamelCase ) # ADDITION _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = set(UpperCamelCase ) & set(UpperCamelCase ) _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' _a = len(UpperCamelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ): '''simple docstring''' if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ): sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] ) _a = sari_score / len(UpperCamelCase ) return 100 * sari_score def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ): '''simple docstring''' _a = len(references[0] ) if any(len(UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(UpperCamelCase )] _a = sacrebleu.corpus_bleu( UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict: """simple docstring""" _a = {} result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) return result
22
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __snake_case :str =False class lowerCAmelCase__ ( unittest.TestCase ): pass @nightly @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __UpperCamelCase ( self : Dict ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) A = torch.manual_seed(0 ) A = pipe.dual_guided( prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__UpperCamelCase ) A = VersatileDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=torch.floataa ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = generator.manual_seed(0 ) A = pipe.dual_guided( prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __UpperCamelCase ( self : Tuple ) -> List[str]: A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) A = 'cyberpunk 2077' A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) A = torch.manual_seed(0 ) A = pipe.dual_guided( prompt=__UpperCamelCase , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images A = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 A = 'A painting of a squirrel eating a burger ' A = torch.manual_seed(0 ) A = pipe.text_to_image( prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images A = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 A = pipe.image_variation(__UpperCamelCase , generator=__UpperCamelCase , output_type='numpy' ).images A = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
106
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): _snake_case : Tuple = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: _snake_case : Any = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def snake_case_ (UpperCamelCase : Optional[int] ): '''simple docstring''' _a = (images / 2 + 0.5).clamp(0 , 1 ) _a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _a = numpy_to_pil(UpperCamelCase ) return images def snake_case_ (UpperCamelCase : str ): '''simple docstring''' if images.ndim == 3: _a = images[None, ...] _a = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: _a = [Image.fromarray(UpperCamelCase ) for image in images] return pil_images
22
0
'''simple docstring''' import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore _UpperCAmelCase : int = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" _UpperCAmelCase : str = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print('''\n'''.join(upper_files) + '''\n''') _UpperCAmelCase : str = [file for file in filepaths if ''' ''' in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print('''\n'''.join(space_files) + '''\n''') _UpperCAmelCase : List[str] = [file for file in filepaths if '''-''' in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print('''\n'''.join(hyphen_files) + '''\n''') _UpperCAmelCase : List[Any] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print('''\n'''.join(nodir_files) + '''\n''') _UpperCAmelCase : Any = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
107
'''simple docstring''' import requests def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ): '''simple docstring''' _a = {'''Content-Type''': '''application/json'''} _a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase ) if response.status_code != 200: _a = ( '''Request to slack returned an error ''' f'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
22
0
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> bool: _UpperCAmelCase = [int(__snake_case ) for i in ip_va_address.split(""".""" ) if i.isdigit()] return len(__snake_case ) == 4 and all(0 <= int(__snake_case ) <= 2_5_4 for octet in octets ) if __name__ == "__main__": __a: int = input().strip() __a: Any = '''valid''' if is_ip_va_address_valid(ip) else '''invalid''' print(F"{ip} is a {valid_or_invalid} IP v4 address.")
108
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _snake_case : Tuple = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''shortest_edge''': 2_56} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any: """simple docstring""" _a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase_ ): _a = target_sizes.numpy() _a = [] for idx in range(len(lowerCAmelCase_ ) ): _a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ ) _a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase_ ) else: _a = logits.argmax(dim=1 ) _a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __a : def __init__( self : Dict ,lowerCamelCase : str ,lowerCamelCase : Dict=13 ,lowerCamelCase : Dict=7 ,lowerCamelCase : int=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : List[Any]=False ,lowerCamelCase : List[str]=True ,lowerCamelCase : List[str]=99 ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Any=5 ,lowerCamelCase : Any=4 ,lowerCamelCase : Union[str, Any]=37 ,lowerCamelCase : Optional[int]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[int]=512 ,lowerCamelCase : Dict=16 ,lowerCamelCase : Dict=2 ,lowerCamelCase : str=0.02 ,lowerCamelCase : Optional[int]=3 ,lowerCamelCase : str=4 ,lowerCamelCase : Tuple=None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,) def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : str ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = LlamaModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : List[str] ,lowerCamelCase : Any ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = LlamaModel(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __SCREAMING_SNAKE_CASE = model( lowerCamelCase ,attention_mask=lowerCamelCase ,encoder_hidden_states=lowerCamelCase ,encoder_attention_mask=lowerCamelCase ,) __SCREAMING_SNAKE_CASE = model( lowerCamelCase ,attention_mask=lowerCamelCase ,encoder_hidden_states=lowerCamelCase ,) __SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict ,lowerCamelCase : Any ,lowerCamelCase : Tuple ,lowerCamelCase : Any ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str] ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = LlamaForCausalLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ,labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Any ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : str ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[Any] ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = LlamaForCausalLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() # first forward pass __SCREAMING_SNAKE_CASE = model( lowerCamelCase ,attention_mask=lowerCamelCase ,encoder_hidden_states=lowerCamelCase ,encoder_attention_mask=lowerCamelCase ,use_cache=lowerCamelCase ,) __SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) ,config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and __SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] ,dim=-1 ) __SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] ,dim=-1 ) __SCREAMING_SNAKE_CASE = model( lowerCamelCase ,attention_mask=lowerCamelCase ,encoder_hidden_states=lowerCamelCase ,encoder_attention_mask=lowerCamelCase ,output_hidden_states=lowerCamelCase ,)["""hidden_states"""][0] __SCREAMING_SNAKE_CASE = model( lowerCamelCase ,attention_mask=lowerCamelCase ,encoder_hidden_states=lowerCamelCase ,encoder_attention_mask=lowerCamelCase ,past_key_values=lowerCamelCase ,output_hidden_states=lowerCamelCase ,)["""hidden_states"""][0] # select random slice __SCREAMING_SNAKE_CASE = ids_tensor((1,) ,output_from_past.shape[-1] ).item() __SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() __SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-3 ) ) def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __a ( _snake_case, _snake_case, _snake_case, unittest.TestCase ): __UpperCamelCase : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __UpperCamelCase : int = (LlamaForCausalLM,) if is_torch_available() else () __UpperCamelCase : Union[str, Any] = ( { 'feature-extraction': LlamaModel, 'text-classification': LlamaForSequenceClassification, 'text-generation': LlamaForCausalLM, 'zero-shot': LlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase : List[str] = False __UpperCamelCase : Tuple = False def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = LlamaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,hidden_size=37 ) def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = input_dict["""input_ids"""] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowerCamelCase ) __SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = LlamaForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ,labels=lowerCamelCase ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = """single_label_classification""" __SCREAMING_SNAKE_CASE = input_dict["""input_ids"""] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowerCamelCase ) __SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = LlamaForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ,labels=lowerCamelCase ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = """multi_label_classification""" __SCREAMING_SNAKE_CASE = input_dict["""input_ids"""] __SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(lowerCamelCase ) __SCREAMING_SNAKE_CASE = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) __SCREAMING_SNAKE_CASE = LlamaForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __SCREAMING_SNAKE_CASE = model(lowerCamelCase ,attention_mask=lowerCamelCase ,labels=lowerCamelCase ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = ids_tensor([1, 10] ,config.vocab_size ) __SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __SCREAMING_SNAKE_CASE = LlamaModel(lowerCamelCase ) original_model.to(lowerCamelCase ) original_model.eval() __SCREAMING_SNAKE_CASE = original_model(lowerCamelCase ).last_hidden_state __SCREAMING_SNAKE_CASE = original_model(lowerCamelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __SCREAMING_SNAKE_CASE = {"""type""": scaling_type, """factor""": 10.0} __SCREAMING_SNAKE_CASE = LlamaModel(lowerCamelCase ) scaled_model.to(lowerCamelCase ) scaled_model.eval() __SCREAMING_SNAKE_CASE = scaled_model(lowerCamelCase ).last_hidden_state __SCREAMING_SNAKE_CASE = scaled_model(lowerCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-5 ) ) @require_torch class __a ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def UpperCAmelCase__ ( self : Optional[int] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [1, 306, 4658, 278, 6593, 310, 2834, 338] __SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" ) __SCREAMING_SNAKE_CASE = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __SCREAMING_SNAKE_CASE = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) ,lowerCamelCase ,atol=1E-2 ,rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __SCREAMING_SNAKE_CASE = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,lowerCamelCase ,atol=1E-5 ,rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [1, 306, 4658, 278, 6593, 310, 2834, 338] __SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" ) __SCREAMING_SNAKE_CASE = model(torch.tensor(lowerCamelCase ) ) # Expected mean on dim = -1 __SCREAMING_SNAKE_CASE = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) ,lowerCamelCase ,atol=1E-2 ,rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __SCREAMING_SNAKE_CASE = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,lowerCamelCase ,atol=1E-5 ,rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [1, 306, 4658, 278, 6593, 310, 2834, 338] __SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" ) __SCREAMING_SNAKE_CASE = model(torch.tensor(lowerCamelCase ) ) # Expected mean on dim = -1 __SCREAMING_SNAKE_CASE = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) ,lowerCamelCase ,atol=1E-2 ,rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __SCREAMING_SNAKE_CASE = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,lowerCamelCase ,atol=1E-2 ,rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [1, 306, 4658, 278, 6593, 310, 2834, 338] __SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" ) __SCREAMING_SNAKE_CASE = model(torch.tensor(lowerCamelCase ) ) __SCREAMING_SNAKE_CASE = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,lowerCamelCase ,atol=1E-2 ,rtol=1E-2 ) # fmt: off __SCREAMING_SNAKE_CASE = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,lowerCamelCase ,atol=1E-5 ,rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def UpperCAmelCase__ ( self : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __SCREAMING_SNAKE_CASE = """Simply put, the theory of relativity states that """ __SCREAMING_SNAKE_CASE = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase ,return_tensors="""pt""" ) __SCREAMING_SNAKE_CASE = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=lowerCamelCase ) # greedy generation outputs __SCREAMING_SNAKE_CASE = model.generate(lowerCamelCase ,max_new_tokens=64 ,top_p=lowerCamelCase ,temperature=1 ,do_sample=lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer.decode(generated_ids[0] ,skip_special_tokens=lowerCamelCase ) self.assertEqual(lowerCamelCase ,lowerCamelCase )
109
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ): '''simple docstring''' _a = {} if train_file is not None: _a = [train_file] if eval_file is not None: _a = [eval_file] if test_file is not None: _a = [test_file] _a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase ) _a = list(ds[list(files.keys() )[0]].features.keys() ) _a = features_name.pop(UpperCamelCase ) _a = list(set(ds[list(files.keys() )[0]][label_name] ) ) _a = {label: i for i, label in enumerate(UpperCamelCase )} _a = tokenizer.model_input_names _a = {} if len(UpperCamelCase ) == 1: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , ) elif len(UpperCamelCase ) == 2: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _snake_case : str = logging.getLogger(__name__) @dataclass class A : lowercase_ = field(metadata={'help': 'Which column contains the label'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} ) lowercase_ = field( default=128 ,metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) lowercase_ = field( default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A : lowercase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowercase_ = field( default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,) def snake_case_ (): '''simple docstring''' _a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _a , _a , _a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _a , _a , _a , _a = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _a = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict: _a = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _a = TFTrainer( model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _a = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _a = trainer.evaluate() _a = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(UpperCamelCase ) return results if __name__ == "__main__": main()
22
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class a ( lowercase ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ): warnings.warn( 'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DeiTImageProcessor instead.' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
110
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( _a ,unittest.TestCase ): lowercase_ = LEDTokenizer lowercase_ = LEDTokenizerFast lowercase_ = True def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" super().setUp() _a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a = {'''unk_token''': '''<unk>'''} _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]: """simple docstring""" return "lower newer", "lower newer" @cached_property def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def __lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _a = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , lowerCAmelCase_ ) self.assertIn('''attention_mask''' , lowerCAmelCase_ ) self.assertNotIn('''labels''' , lowerCAmelCase_ ) self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" _a = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer( ['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = ['''A long paragraph for summarization.'''] _a = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' ) _a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' ) _a = inputs['''input_ids'''] _a = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = ['''Summary of the text.''', '''Another summary.'''] _a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']] _a = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" pass def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = '''A, <mask> AllenNLP sentence.''' _a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
22
0
def __lowercase ( _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" if not numbers: return 0 if not isinstance(_UpperCamelCase, (list, tuple) ) or not all( isinstance(_UpperCamelCase, _UpperCamelCase ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) lowercase : Tuple = numbers[0] for i in range(1, len(_UpperCamelCase ) ): # update the maximum and minimum subarray products lowercase : Tuple = numbers[i] if number < 0: lowercase , lowercase : int = min_till_now, max_till_now lowercase : Dict = max(_UpperCamelCase, max_till_now * number ) lowercase : str = min(_UpperCamelCase, min_till_now * number ) # update the maximum product found till now lowercase : str = max(_UpperCamelCase, _UpperCamelCase ) return max_prod
319
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def snake_case_ (UpperCamelCase : SplitDict ): '''simple docstring''' _a = split_dict._to_yaml_list() assert len(UpperCamelCase ) == len(UpperCamelCase ) _a = SplitDict._from_yaml_list(UpperCamelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump _a = None # the split name of split_dict takes over the name of the split info object _a = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] ) def snake_case_ (UpperCamelCase : List[str] ): '''simple docstring''' _a = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
22
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self : Tuple ) -> Dict: """simple docstring""" snake_case_ = inspect.getfile(accelerate.test_utils ) snake_case_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) snake_case_ = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) snake_case_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def lowerCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" print(F'''Found {torch.cuda.device_count()} devices.''' ) snake_case_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" print(F'''Found {torch.cuda.device_count()} devices.''' ) snake_case_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" snake_case_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) snake_case_ = ["torchrun", F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :str = Accelerator() SCREAMING_SNAKE_CASE :str = (accelerator.state.process_index + 2, 10) SCREAMING_SNAKE_CASE :Optional[Any] = torch.randint(0, 10, shape).to(accelerator.device) SCREAMING_SNAKE_CASE :Dict = '' SCREAMING_SNAKE_CASE :Optional[int] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." SCREAMING_SNAKE_CASE :str = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." SCREAMING_SNAKE_CASE :int = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
283
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n' class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" _a = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _a = self.diffusers_dir shutil.copy( os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _a = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]: """simple docstring""" _a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: _a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result _a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) _a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ ) _a = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f: f.write(lowerCAmelCase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ ) with open(lowerCAmelCase_ , '''r''' ) as f: self.assertTrue(f.read() , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , ) # Copy consistency with a really long name _a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
22
0
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : int = { 'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json', } class lowercase ( _a ): lowercase__ : Union[str, Any] = """efficientnet""" def __init__( self : str , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 600 , _UpperCamelCase : float = 2.0 , _UpperCamelCase : float = 3.1 , _UpperCamelCase : int = 8 , _UpperCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , _UpperCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , _UpperCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , _UpperCamelCase : List[int] = [] , _UpperCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , _UpperCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , _UpperCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , _UpperCamelCase : float = 0.2_5 , _UpperCamelCase : str = "swish" , _UpperCamelCase : int = 2_560 , _UpperCamelCase : str = "mean" , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : float = 0.0_0_1 , _UpperCamelCase : float = 0.9_9 , _UpperCamelCase : float = 0.5 , _UpperCamelCase : float = 0.2 , **_UpperCamelCase : Optional[int] , ) -> Optional[int]: '''simple docstring''' super().__init__(**lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = width_coefficient SCREAMING_SNAKE_CASE = depth_coefficient SCREAMING_SNAKE_CASE = depth_divisor SCREAMING_SNAKE_CASE = kernel_sizes SCREAMING_SNAKE_CASE = in_channels SCREAMING_SNAKE_CASE = out_channels SCREAMING_SNAKE_CASE = depthwise_padding SCREAMING_SNAKE_CASE = strides SCREAMING_SNAKE_CASE = num_block_repeats SCREAMING_SNAKE_CASE = expand_ratios SCREAMING_SNAKE_CASE = squeeze_expansion_ratio SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dim SCREAMING_SNAKE_CASE = pooling_type SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = batch_norm_eps SCREAMING_SNAKE_CASE = batch_norm_momentum SCREAMING_SNAKE_CASE = dropout_rate SCREAMING_SNAKE_CASE = drop_connect_rate SCREAMING_SNAKE_CASE = sum(lowerCAmelCase_ ) * 4 class lowercase ( _a ): lowercase__ : Union[str, Any] = version.parse("""1.11""" ) @property def __snake_case( self : Any ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def __snake_case( self : Dict ) -> float: '''simple docstring''' return 1e-5
403
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _snake_case : Tuple = logging.get_logger(__name__) _snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _snake_case : List[Any] = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } _snake_case : Union[str, Any] = { 'squeezebert/squeezebert-uncased': 512, 'squeezebert/squeezebert-mnli': 512, 'squeezebert/squeezebert-mnli-headless': 512, } _snake_case : Tuple = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class A ( _a ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = SqueezeBertTokenizer def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int: """simple docstring""" super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars ): _a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**lowerCAmelCase_ ) _a = do_lower_case def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]: """simple docstring""" _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" _a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ )
22
0
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) -> Optional[int]: UpperCamelCase_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: UpperCamelCase_: Dict = 1 - (matter_density + radiation_density + dark_energy) UpperCamelCase_: Union[str, Any] = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCamelCase_: List[Any] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation A_ : List[str] = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1E-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
57
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case : Dict = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = do_rescale _a = do_normalize _a = do_center_crop _a = crop_size _a = size _a = resample _a = rescale_factor _a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "shortest_edge" in size: _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _a = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = do_rescale if do_rescale is not None else self.do_rescale _a = do_normalize if do_normalize is not None else self.do_normalize _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ ) if not is_batched(lowerCAmelCase_ ): _a = [images] if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
22
0
"""simple docstring""" from collections.abc import Sequence def __A (_SCREAMING_SNAKE_CASE = None ) ->Optional[Any]: """simple docstring""" if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) lowerCAmelCase__ :Any = nums[0] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): lowerCAmelCase__ :str = nums[i] lowerCAmelCase__ :Any = max(_SCREAMING_SNAKE_CASE , ans + num , _SCREAMING_SNAKE_CASE ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __A = int(input("""Enter number of elements : """).strip()) __A = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n] print(max_subsequence_sum(array))
93
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case : str = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[int] = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = ['LayoutLMv3FeatureExtractor'] _snake_case : Tuple = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys _snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
22
0
'''simple docstring''' def lowerCamelCase_ ( A_ ): __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = { '''^''': 3, '''*''': 2, '''/''': 2, '''%''': 2, '''+''': 1, '''-''': 1, } # Priority of each operator __lowerCamelCase = len(A_ ) if (len(A_ ) > 7) else 7 # Print table header for output print( '''Symbol'''.center(8 ) , '''Stack'''.center(A_ ) , '''Postfix'''.center(A_ ) , sep=''' | ''' , ) print('''-''' * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(A_ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(A_ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(A_ ) == 0: stack.append(A_ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(A_ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(A_ ) # push x to stack print( x.center(8 ) , (''''''.join(A_ )).ljust(A_ ) , (''''''.join(A_ )).ljust(A_ ) , sep=''' | ''' , ) # Output in tabular format while len(A_ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( ''' '''.center(8 ) , (''''''.join(A_ )).ljust(A_ ) , (''''''.join(A_ )).ljust(A_ ) , sep=''' | ''' , ) # Output in tabular format return "".join(A_ ) # return Postfix as str def lowerCamelCase_ ( A_ ): __lowerCamelCase = list(infix[::-1] ) # reverse the infix equation for i in range(len(A_ ) ): if infix[i] == "(": __lowerCamelCase = ''')''' # change "(" to ")" elif infix[i] == ")": __lowerCamelCase = '''(''' # change ")" to "(" return (infix_2_postfix(''''''.join(A_ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": _UpperCamelCase : int =input("\nEnter an Infix Equation = ") # Input an Infix equation _UpperCamelCase : List[str] =''.join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
316
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A ( _a ): lowercase_ = (DDPMParallelScheduler,) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]: """simple docstring""" _a = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**lowerCAmelCase_ ) return config def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , ) def __lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5 def __lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = self.dummy_sample_deter + 0.1 _a = self.dummy_sample_deter - 0.1 _a = samplea.shape[0] _a = torch.stack([samplea, samplea, samplea] , dim=0 ) _a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ ) _a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config(prediction_type='''v_prediction''' ) _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def __lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) _a = scheduler.timesteps for i, timestep in enumerate(lowerCAmelCase_ ): if i == len(lowerCAmelCase_ ) - 1: _a = -1 else: _a = timesteps[i + 1] _a = scheduler.previous_timestep(lowerCAmelCase_ ) _a = prev_t.item() self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 51, 0] with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] _a = len(lowerCAmelCase_ ) with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
22
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> Optional[int]: '''simple docstring''' A = 384 if "tiny" in model_name: A = [3, 3, 9, 3] A = [96, 192, 384, 768] if "small" in model_name: A = [3, 3, 27, 3] A = [96, 192, 384, 768] if "base" in model_name: A = [3, 3, 27, 3] A = [128, 256, 512, 1024] A = 512 if "large" in model_name: A = [3, 3, 27, 3] A = [192, 384, 768, 1536] A = 768 if "xlarge" in model_name: A = [3, 3, 27, 3] A = [256, 512, 1024, 2048] A = 1024 # set label information A = 150 A = 'huggingface/label-files' A = 'ade20k-id2label.json' A = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) ) A = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} A = {v: k for k, v in idalabel.items()} A = ConvNextConfig( depths=lowerCAmelCase__ , hidden_sizes=lowerCAmelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) A = UperNetConfig( backbone_config=lowerCAmelCase__ , auxiliary_in_channels=lowerCAmelCase__ , num_labels=lowerCAmelCase__ , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , ) return config def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> List[Any]: '''simple docstring''' A = [] # fmt: off # stem rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') ) rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') ) rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') ) rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> str: '''simple docstring''' A = dct.pop(lowerCAmelCase__ ) A = val def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]: '''simple docstring''' A = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } A = model_name_to_url[model_name] A = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' )['state_dict'] A = get_upernet_config(lowerCAmelCase__ ) A = UperNetForSemanticSegmentation(lowerCAmelCase__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A = state_dict.pop(lowerCAmelCase__ ) if "bn" in key: A = key.replace('bn' , 'batch_norm' ) A = val # rename keys A = create_rename_keys(lowerCAmelCase__ ) for src, dest in rename_keys: rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) model.load_state_dict(lowerCAmelCase__ ) # verify on image A = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' ) A = SegformerImageProcessor() A = processor(lowerCAmelCase__ , return_tensors='pt' ).pixel_values with torch.no_grad(): A = model(lowerCAmelCase__ ) if model_name == "upernet-convnext-tiny": A = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": A = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": A = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": A = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": A = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCAmelCase__ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCAmelCase__ ) if push_to_hub: print(F'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(F'''openmmlab/{model_name}''' ) processor.push_to_hub(F'''openmmlab/{model_name}''' ) if __name__ == "__main__": __snake_case :str =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[F'''upernet-convnext-{size}''' for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __snake_case :Dict =parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
106
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def snake_case_ (UpperCamelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ): '''simple docstring''' _a = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(UpperCamelCase , UpperCamelCase ) # Predict target for test data _a = xgb.predict(UpperCamelCase ) _a = predictions.reshape(len(UpperCamelCase ) , 1 ) return predictions def snake_case_ (): '''simple docstring''' _a = fetch_california_housing() _a , _a = data_handling(UpperCamelCase ) _a , _a , _a , _a = train_test_split( UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 ) _a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Error printing print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' ) print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
22
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCamelCase ( _a ): """simple docstring""" UpperCAmelCase_ : List[Any] ="sew-d" def __init__( self , UpperCAmelCase=32 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase=2 , UpperCAmelCase=512 , UpperCAmelCase=256 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=("p2c", "c2p") , UpperCAmelCase="layer_norm" , UpperCAmelCase="gelu_python" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-7 , UpperCAmelCase=1E-5 , UpperCAmelCase="group" , UpperCAmelCase="gelu" , UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase=False , UpperCAmelCase=128 , UpperCAmelCase=16 , UpperCAmelCase=True , UpperCAmelCase=0.05 , UpperCAmelCase=10 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=10 , UpperCAmelCase=0 , UpperCAmelCase="mean" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=256 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Dict: '''simple docstring''' super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) __snake_case : Any = hidden_size __snake_case : Union[str, Any] = feat_extract_norm __snake_case : Tuple = feat_extract_activation __snake_case : str = list(lowerCAmelCase_ ) __snake_case : List[Any] = list(lowerCAmelCase_ ) __snake_case : List[str] = list(lowerCAmelCase_ ) __snake_case : Dict = conv_bias __snake_case : Union[str, Any] = num_conv_pos_embeddings __snake_case : List[Any] = num_conv_pos_embedding_groups __snake_case : List[Any] = len(self.conv_dim ) __snake_case : Union[str, Any] = num_hidden_layers __snake_case : List[Any] = intermediate_size __snake_case : Any = squeeze_factor __snake_case : Union[str, Any] = max_position_embeddings __snake_case : Any = position_buckets __snake_case : Union[str, Any] = share_att_key __snake_case : List[str] = relative_attention __snake_case : List[str] = norm_rel_ebd __snake_case : str = list(lowerCAmelCase_ ) __snake_case : Union[str, Any] = hidden_act __snake_case : Optional[Any] = num_attention_heads __snake_case : Optional[int] = hidden_dropout __snake_case : int = attention_dropout __snake_case : Any = activation_dropout __snake_case : List[str] = feat_proj_dropout __snake_case : List[Any] = final_dropout __snake_case : List[str] = layer_norm_eps __snake_case : Dict = feature_layer_norm_eps __snake_case : Optional[Any] = initializer_range __snake_case : Optional[int] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __snake_case : Union[str, Any] = apply_spec_augment __snake_case : Tuple = mask_time_prob __snake_case : Optional[Any] = mask_time_length __snake_case : Dict = mask_time_min_masks __snake_case : Optional[int] = mask_feature_prob __snake_case : Any = mask_feature_length __snake_case : List[Any] = mask_feature_min_masks # ctc loss __snake_case : Optional[Any] = ctc_loss_reduction __snake_case : Union[str, Any] = ctc_zero_infinity # sequence classification __snake_case : int = use_weighted_layer_sum __snake_case : List[Any] = classifier_proj_size @property def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
243
'''simple docstring''' import qiskit def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' _a = qiskit.Aer.get_backend('''aer_simulator''' ) _a = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator _a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(UpperCamelCase ) if __name__ == "__main__": _snake_case : Tuple = half_adder(1, 1) print(F'''Half Adder Output Qubit Counts: {counts}''')
22
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) snake_case = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
378
'''simple docstring''' from collections.abc import Generator from math import sin def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) != 32: raise ValueError('''Input must be of length 32''' ) _a = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''08x''' )[-8:] _a = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = B'''''' for char in message: bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' ) _a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCamelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(UpperCamelCase ) , 512 ): _a = bit_string[pos : pos + 512] _a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''032b''' ) _a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCamelCase , 2 ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' return (a + b) % 2**32 def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = preprocess(UpperCamelCase ) _a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _a = 0X67452301 _a = 0Xefcdab89 _a = 0X98badcfe _a = 0X10325476 _a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCamelCase ): _a = aa _a = ba _a = ca _a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _a = d ^ (b & (c ^ d)) _a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _a = c ^ (d & (b ^ c)) _a = (5 * i + 1) % 16 elif i <= 47: _a = b ^ c ^ d _a = (3 * i + 5) % 16 else: _a = c ^ (b | not_aa(UpperCamelCase )) _a = (7 * i) % 16 _a = (f + a + added_consts[i] + block_words[g]) % 2**32 _a = d _a = c _a = b _a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) ) # Add hashed chunk to running total _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" def lowercase (_snake_case ) -> str: '''simple docstring''' if len(_snake_case ) <= 1: return lst __UpperCamelCase = 1 while i < len(_snake_case ): if lst[i - 1] <= lst[i]: i += 1 else: __UpperCamelCase , __UpperCamelCase = lst[i], lst[i - 1] i -= 1 if i == 0: __UpperCamelCase = 1 return lst if __name__ == "__main__": _A = input("Enter numbers separated by a comma:\n").strip() _A = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
505
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A ( unittest.TestCase ): def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]: """simple docstring""" _a = size if size is not None else {'''height''': 18, '''width''': 18} _a = parent _a = batch_size _a = num_channels _a = image_size _a = min_resolution _a = max_resolution _a = do_resize _a = size _a = do_normalize def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4], [-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A ( _a ,unittest.TestCase ): lowercase_ = ImageGPTImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _a = ImageGPTImageProcessingTester(self ) @property def __lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) ) def __lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" _a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) _a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def __lowerCAmelCase ( self : str ) -> str: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) _a = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' ) image_processor_first.to_json_file(lowerCAmelCase_ ) _a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowerCAmelCase_ ) _a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" pass def snake_case_ (): '''simple docstring''' _a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) _a = Image.open(dataset[4]['''file'''] ) _a = Image.open(dataset[5]['''file'''] ) _a = [imagea, imagea] return images @require_vision @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) _a = prepare_images() # test non-batched _a = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 10_24) ) _a = [3_06, 1_91, 1_91] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ ) # test batched _a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 10_24) ) _a = [3_03, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
22
0
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase ( _a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""] __SCREAMING_SNAKE_CASE = """BlipImageProcessor""" __SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""") def __init__(self , __a , __a ) -> List[str]: """simple docstring""" UpperCAmelCase__ = False super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase__ = self.image_processor def __call__(self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ) -> BatchEncoding: """simple docstring""" if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: UpperCAmelCase__ = self.tokenizer UpperCAmelCase__ = self.tokenizer( text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , ) return text_encoding # add pixel_values UpperCAmelCase__ = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) if text is not None: UpperCAmelCase__ = self.tokenizer( text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , ) else: UpperCAmelCase__ = None if text_encoding is not None: encoding_image_processor.update(lowerCAmelCase_ ) return encoding_image_processor def UpperCamelCase__ (self , *__a , **__a ) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCamelCase__ (self , *__a , **__a ) -> Optional[int]: """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @property def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.tokenizer.model_input_names UpperCAmelCase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
146
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" _a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _a = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) _a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits _a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean() _a = -(labels.shape[-1] * loss.item()) _a = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
22
0
from __future__ import annotations import bisect def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0, _UpperCamelCase = -1 ) ->Dict: """simple docstring""" if hi < 0: lowercase : Optional[Any] = len(_UpperCamelCase ) while lo < hi: lowercase : Optional[Any] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: lowercase : str = mid + 1 else: lowercase : Union[str, Any] = mid return lo def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0, _UpperCamelCase = -1 ) ->List[str]: """simple docstring""" if hi < 0: lowercase : int = len(_UpperCamelCase ) while lo < hi: lowercase : str = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: lowercase : Union[str, Any] = mid + 1 else: lowercase : Optional[int] = mid return lo def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0, _UpperCamelCase = -1 ) ->str: """simple docstring""" sorted_collection.insert(bisect_left(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ), _UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0, _UpperCamelCase = -1 ) ->Union[str, Any]: """simple docstring""" sorted_collection.insert(bisect_right(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ), _UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : Any = 0 lowercase : Any = len(_UpperCamelCase ) - 1 while left <= right: lowercase : List[str] = left + (right - left) // 2 lowercase : Any = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: lowercase : Optional[int] = midpoint - 1 else: lowercase : List[Any] = midpoint + 1 return None def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict: """simple docstring""" lowercase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase, _UpperCamelCase ) if index != len(_UpperCamelCase ) and sorted_collection[index] == item: return index return None def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple: """simple docstring""" if right < left: return None lowercase : Tuple = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, midpoint - 1 ) else: return binary_search_by_recursion(_UpperCamelCase, _UpperCamelCase, midpoint + 1, _UpperCamelCase ) if __name__ == "__main__": __a = input('''Enter numbers separated by comma:\n''').strip() __a = sorted(int(item) for item in user_input.split(''',''')) __a = int(input('''Enter a single number to be found in the list:\n''')) __a = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
319
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _snake_case : Optional[Any] = 8 def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ): '''simple docstring''' _a = x.device _a = (x * 255).int().clamp(0 , 255 ) _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' ) _a = ((x & mask) != 0).float() _a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' ) _a = bits * 2 - 1 return bits def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ): '''simple docstring''' _a = x.device _a = (x > 0).int() _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 ) _a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 255).clamp(0.0 , 1.0 ) def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _a = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _a = self.alphas_cumprod[timestep] _a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _a = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) _a = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu''' _a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase ) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise _a = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ): '''simple docstring''' _a = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 ) else: _a = None # 1. compute alphas, betas _a = self.alphas_cumprod[t] _a = self.alphas_cumprod[t - 1] if t > 0 else self.one _a = 1 - alpha_prod_t _a = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _a = model_output else: raise ValueError(f'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _a = 0 if t > 0: _a = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device ) _a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise _a = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) class A ( _a ): def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int: """simple docstring""" super().__init__() _a = bit_scale _a = ( ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) @torch.no_grad() def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" _a = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , ) _a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale _a = latents.to(self.device ) self.scheduler.set_timesteps(lowerCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample # compute the previous noisy sample x_t -> x_t-1 _a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample _a = bits_to_decimal(lowerCAmelCase_ ) if output_type == "pil": _a = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
22
0
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : """simple docstring""" def __init__( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=1_3 , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Optional[Any]=9_9 , _lowerCAmelCase : List[Any]=2_4 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Tuple=3_7 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[Any]=5_1_2 , _lowerCAmelCase : Tuple=1_6 , _lowerCAmelCase : int=2 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Any=None , _lowerCAmelCase : int=1_0_0_0 , ) -> int: """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = scope snake_case_ = range_bbox def lowerCAmelCase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: snake_case_ = bbox[i, j, 3] snake_case_ = bbox[i, j, 1] snake_case_ = t if bbox[i, j, 2] < bbox[i, j, 0]: snake_case_ = bbox[i, j, 2] snake_case_ = bbox[i, j, 0] snake_case_ = t snake_case_ = None if self.use_input_mask: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , ) -> Optional[int]: """simple docstring""" snake_case_ = LiltModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() snake_case_ = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) snake_case_ = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ ) snake_case_ = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , ) -> Any: """simple docstring""" snake_case_ = self.num_labels snake_case_ = LiltForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() snake_case_ = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , ) -> Dict: """simple docstring""" snake_case_ = LiltForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() snake_case_ = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = config_and_inputs snake_case_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( _a , _a , _a , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = False def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" return True def lowerCAmelCase__ ( self : List[str] ) -> Any: """simple docstring""" snake_case_ = LiltModelTester(self ) snake_case_ = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 ) def lowerCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case_ = type self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def lowerCAmelCase__ ( self : str ) -> Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ ) def lowerCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ ) @slow def lowerCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = LiltModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @slow class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" snake_case_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(lowerCAmelCase_ ) snake_case_ = torch.tensor([[1, 2]] , device=lowerCAmelCase_ ) snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCAmelCase_ ) # forward pass with torch.no_grad(): snake_case_ = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ ) snake_case_ = torch.Size([1, 2, 7_6_8] ) snake_case_ = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=lowerCAmelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCAmelCase_ , atol=1e-3 ) )
283
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Any = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A ( _a ): lowercase_ = 'roformer' def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) _a = vocab_size _a = hidden_size if embedding_size is None else embedding_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = layer_norm_eps _a = rotary_value _a = use_cache class A ( _a ): @property def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} _a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
22
0
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(_a ) class lowercase ( _a ): def __init__( self : Any , **_UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' super().__init__(**lowerCAmelCase_ ) requires_backends(self , "vision" ) requires_backends(self , "torch" ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) self.check_model_type(lowerCAmelCase_ ) def __snake_case( self : Tuple , **_UpperCamelCase : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = {} # preprocess args if "points_per_batch" in kwargs: SCREAMING_SNAKE_CASE = kwargs["points_per_batch"] if "points_per_crop" in kwargs: SCREAMING_SNAKE_CASE = kwargs["points_per_crop"] if "crops_n_layers" in kwargs: SCREAMING_SNAKE_CASE = kwargs["crops_n_layers"] if "crop_overlap_ratio" in kwargs: SCREAMING_SNAKE_CASE = kwargs["crop_overlap_ratio"] if "crop_n_points_downscale_factor" in kwargs: SCREAMING_SNAKE_CASE = kwargs["crop_n_points_downscale_factor"] # postprocess args if "pred_iou_thresh" in kwargs: SCREAMING_SNAKE_CASE = kwargs["pred_iou_thresh"] if "stability_score_offset" in kwargs: SCREAMING_SNAKE_CASE = kwargs["stability_score_offset"] if "mask_threshold" in kwargs: SCREAMING_SNAKE_CASE = kwargs["mask_threshold"] if "stability_score_thresh" in kwargs: SCREAMING_SNAKE_CASE = kwargs["stability_score_thresh"] if "crops_nms_thresh" in kwargs: SCREAMING_SNAKE_CASE = kwargs["crops_nms_thresh"] if "output_rle_mask" in kwargs: SCREAMING_SNAKE_CASE = kwargs["output_rle_mask"] if "output_bboxes_mask" in kwargs: SCREAMING_SNAKE_CASE = kwargs["output_bboxes_mask"] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Dict , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : Optional[Any] , _UpperCamelCase : str=None , _UpperCamelCase : str=None , **_UpperCamelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' return super().__call__(lowerCAmelCase_ , *lowerCAmelCase_ , num_workers=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , **lowerCAmelCase_ ) def __snake_case( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any]=64 , _UpperCamelCase : int = 0 , _UpperCamelCase : float = 512 / 1_500 , _UpperCamelCase : Optional[int] = 32 , _UpperCamelCase : Optional[int] = 1 , ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = load_image(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = self.image_processor.size["longest_edge"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = self.image_processor(images=lowerCAmelCase_ , return_tensors="pt" ) with self.device_placement(): if self.framework == "pt": SCREAMING_SNAKE_CASE = self.get_inference_context() with inference_context(): SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(lowerCAmelCase_ , device=self.device ) SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) ) SCREAMING_SNAKE_CASE = image_embeddings SCREAMING_SNAKE_CASE = grid_points.shape[1] SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( "Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. " "To return all points at once, set points_per_batch to None" ) for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :] SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch] SCREAMING_SNAKE_CASE = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def __snake_case( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=0.8_8 , _UpperCamelCase : Tuple=0.9_5 , _UpperCamelCase : str=0 , _UpperCamelCase : Optional[int]=1 , ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = model_inputs.pop("input_boxes" ) SCREAMING_SNAKE_CASE = model_inputs.pop("is_last" ) SCREAMING_SNAKE_CASE = model_inputs.pop("original_sizes" ).tolist() SCREAMING_SNAKE_CASE = model_inputs.pop("reshaped_input_sizes" ).tolist() SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase_ ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks SCREAMING_SNAKE_CASE = model_outputs["pred_masks"] SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , binarize=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = model_outputs["iou_scores"] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def __snake_case( self : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : List[Any]=0.7 , ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for model_output in model_outputs: all_scores.append(model_output.pop("iou_scores" ) ) all_masks.extend(model_output.pop("masks" ) ) all_boxes.append(model_output.pop("boxes" ) ) SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = defaultdict(lowerCAmelCase_ ) for output in model_outputs: for k, v in output.items(): extra[k].append(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = {} if output_rle_mask: SCREAMING_SNAKE_CASE = rle_mask if output_bboxes_mask: SCREAMING_SNAKE_CASE = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
403
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A : lowercase_ = 42 lowercase_ = 42 class A : def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str: """simple docstring""" _a = [[] for _ in range(lowerCAmelCase_ )] _a = size def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex] ) @property def __lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" return self._size def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict: """simple docstring""" if weight not in (0, 1): raise ValueError('''Edge weight must be either 0 or 1.''' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('''Vertex indexes must be in [0; size).''' ) self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None: """simple docstring""" _a = deque([start_vertex] ) _a = [None] * self.size _a = 0 while queue: _a = queue.popleft() _a = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _a = current_distance + edge.weight _a = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and new_distance >= dest_vertex_distance ): continue _a = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('''No path from start_vertex to finish_vertex.''' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
22
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 A_ : Optional[Any] = data_utils.TransfoXLTokenizer A_ : int = data_utils.TransfoXLCorpus A_ : Union[str, Any] = data_utils A_ : List[Any] = data_utils def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(UpperCAmelCase__ , 'rb' ) as fp: UpperCamelCase_: str = pickle.load(UpperCAmelCase__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) UpperCamelCase_: List[Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) UpperCamelCase_: int = corpus.vocab.__dict__ torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: List[Any] = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ ) UpperCamelCase_: Optional[Any] = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model UpperCamelCase_: str = os.path.abspath(UpperCAmelCase__ ) UpperCamelCase_: int = os.path.abspath(UpperCAmelCase__ ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": UpperCamelCase_: Dict = TransfoXLConfig() else: UpperCamelCase_: int = TransfoXLConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) UpperCamelCase_: Optional[int] = TransfoXLLMHeadModel(UpperCAmelCase__ ) UpperCamelCase_: List[Any] = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model UpperCamelCase_: int = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: List[str] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' ) with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": A_ : int = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--tf_checkpoint_path', default='', type=str, help='An optional path to a TensorFlow checkpoint path to be converted.', ) parser.add_argument( '--transfo_xl_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--transfo_xl_dataset_file', default='', type=str, help='An optional dataset file to be converted in a vocabulary.', ) A_ : int = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
57
'''simple docstring''' from math import pi, sqrt def snake_case_ (UpperCamelCase : float ): '''simple docstring''' if num <= 0: raise ValueError('''math domain error''' ) if num > 171.5: raise OverflowError('''math range error''' ) elif num - int(UpperCamelCase ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(UpperCamelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def snake_case_ (): '''simple docstring''' assert gamma(0.5 ) == sqrt(UpperCamelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _snake_case : Optional[Any] = 1.0 while num: _snake_case : Dict = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
22
0
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : """simple docstring""" @staticmethod def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' pass def __A (_SCREAMING_SNAKE_CASE ) ->Any: """simple docstring""" lowerCAmelCase__ :Optional[Any] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __magic_name__ :Any = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = DepthEstimationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Any = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' ) self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , lowerCAmelCase_ ) import datasets lowerCAmelCase__ :Union[str, Any] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) lowerCAmelCase__ :Tuple = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] ) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, ] , lowerCAmelCase_ , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF' ) def snake_case ( self ): '''simple docstring''' pass @slow @require_torch def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = 'Intel/dpt-large' lowerCAmelCase__ :Optional[Any] = pipeline('depth-estimation' , model=lowerCAmelCase_ ) lowerCAmelCase__ :Union[str, Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' ) lowerCAmelCase__ :str = hashimage(outputs['depth'] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_62 ) @require_torch def snake_case ( self ): '''simple docstring''' self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
93
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
22
0
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _UpperCamelCase : Optional[Any] =8 def lowerCamelCase_ ( A_ , A_=BITS ): __lowerCamelCase = x.device __lowerCamelCase = (x * 2_55).int().clamp(0 , 2_55 ) __lowerCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=A_ ) __lowerCamelCase = rearrange(A_ , '''d -> d 1 1''' ) __lowerCamelCase = rearrange(A_ , '''b c h w -> b c 1 h w''' ) __lowerCamelCase = ((x & mask) != 0).float() __lowerCamelCase = rearrange(A_ , '''b c d h w -> b (c d) h w''' ) __lowerCamelCase = bits * 2 - 1 return bits def lowerCamelCase_ ( A_ , A_=BITS ): __lowerCamelCase = x.device __lowerCamelCase = (x > 0).int() __lowerCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=A_ , dtype=torch.intaa ) __lowerCamelCase = rearrange(A_ , '''d -> d 1 1''' ) __lowerCamelCase = rearrange(A_ , '''b (c d) h w -> b c d h w''' , d=8 ) __lowerCamelCase = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 2_55).clamp(0.0 , 1.0 ) def lowerCamelCase_ ( self , A_ , A_ , A_ , A_ = 0.0 , A_ = True , A_=None , A_ = True , ): if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) __lowerCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas __lowerCamelCase = self.alphas_cumprod[timestep] __lowerCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod __lowerCamelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" __lowerCamelCase = self.bit_scale if self.config.clip_sample: __lowerCamelCase = torch.clamp(A_ , -scale , A_ ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) __lowerCamelCase = self._get_variance(A_ , A_ ) __lowerCamelCase = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide __lowerCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowerCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowerCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 __lowerCamelCase = model_output.device if torch.is_tensor(A_ ) else '''cpu''' __lowerCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=A_ ).to(A_ ) __lowerCamelCase = self._get_variance(A_ , A_ ) ** 0.5 * eta * noise __lowerCamelCase = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ ) def lowerCamelCase_ ( self , A_ , A_ , A_ , A_="epsilon" , A_=None , A_ = True , ): __lowerCamelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: __lowerCamelCase , __lowerCamelCase = torch.split(A_ , sample.shape[1] , dim=1 ) else: __lowerCamelCase = None # 1. compute alphas, betas __lowerCamelCase = self.alphas_cumprod[t] __lowerCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one __lowerCamelCase = 1 - alpha_prod_t __lowerCamelCase = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": __lowerCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": __lowerCamelCase = model_output else: raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' ) # 3. Clip "predicted x_0" __lowerCamelCase = self.bit_scale if self.config.clip_sample: __lowerCamelCase = torch.clamp(A_ , -scale , A_ ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __lowerCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t __lowerCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __lowerCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __lowerCamelCase = 0 if t > 0: __lowerCamelCase = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=A_ ).to(model_output.device ) __lowerCamelCase = (self._get_variance(A_ , predicted_variance=A_ ) ** 0.5) * noise __lowerCamelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=A_ , pred_original_sample=A_ ) class _SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self , _snake_case , _snake_case , _snake_case = 1.0 , ): """simple docstring""" super().__init__() __lowerCamelCase = bit_scale __lowerCamelCase = ( ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) @torch.no_grad() def __call__( self , _snake_case = 2_56 , _snake_case = 2_56 , _snake_case = 50 , _snake_case = None , _snake_case = 1 , _snake_case = "pil" , _snake_case = True , **_snake_case , ): """simple docstring""" __lowerCamelCase = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , ) __lowerCamelCase = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale __lowerCamelCase = latents.to(self.device ) self.scheduler.set_timesteps(lowerCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual __lowerCamelCase = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample # compute the previous noisy sample x_t -> x_t-1 __lowerCamelCase = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample __lowerCamelCase = bits_to_decimal(lowerCAmelCase_ ) if output_type == "pil": __lowerCamelCase = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
316
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' _snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' _snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def snake_case_ (UpperCamelCase : Tuple ): '''simple docstring''' def remove_articles(UpperCamelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase ) def white_space_fix(UpperCamelCase : Union[str, Any] ): return " ".join(text.split() ) def remove_punc(UpperCamelCase : str ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCamelCase : Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) ) def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ): '''simple docstring''' _a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )] return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100 def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(UpperCamelCase ) _a = Counter(UpperCamelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(UpperCamelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = keeptmpscorea / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(UpperCamelCase ) > 0: _a = deltmpscorea / len(UpperCamelCase ) # ADDITION _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = set(UpperCamelCase ) & set(UpperCamelCase ) _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' _a = len(UpperCamelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ): '''simple docstring''' if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ): sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] ) _a = sari_score / len(UpperCamelCase ) return 100 * sari_score def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ): '''simple docstring''' _a = len(references[0] ) if any(len(UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(UpperCamelCase )] _a = sacrebleu.corpus_bleu( UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict: """simple docstring""" _a = {} result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) return result
22
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __snake_case :Dict ={} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case :Any =['MLukeTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys __snake_case :Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
106
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): _snake_case : Tuple = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: _snake_case : Any = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def snake_case_ (UpperCamelCase : Optional[int] ): '''simple docstring''' _a = (images / 2 + 0.5).clamp(0 , 1 ) _a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _a = numpy_to_pil(UpperCamelCase ) return images def snake_case_ (UpperCamelCase : str ): '''simple docstring''' if images.ndim == 3: _a = images[None, ...] _a = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: _a = [Image.fromarray(UpperCamelCase ) for image in images] return pil_images
22
0
from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCAmelCase__( ) -> Union[str, Any]: __snake_case : Optional[int] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowercase ) __snake_case : Tuple = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=lowercase ) env_command_parser(subparsers=lowercase ) launch_command_parser(subparsers=lowercase ) tpu_command_parser(subparsers=lowercase ) test_command_parser(subparsers=lowercase ) # Let's go __snake_case : int = parser.parse_args() if not hasattr(lowercase , "func" ): parser.print_help() exit(1 ) # Run args.func(lowercase ) if __name__ == "__main__": main()
243
'''simple docstring''' import requests def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ): '''simple docstring''' _a = {'''Content-Type''': '''application/json'''} _a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase ) if response.status_code != 200: _a = ( '''Request to slack returned an error ''' f'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
22
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() snake_case = logging.get_logger(__name__) def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : List[str] = SwinConfig( embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=["stage2", "stage3", "stage4"] , ) lowerCAmelCase__ : Optional[Any] = DetaConfig( backbone_config=lowerCamelCase_ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=lowerCamelCase_ , with_box_refine=lowerCamelCase_ , two_stage=lowerCamelCase_ , ) # set labels lowerCAmelCase__ : List[Any] = "huggingface/label-files" if "o365" in model_name: lowerCAmelCase__ : str = 3_6_6 lowerCAmelCase__ : Tuple = "object365-id2label.json" else: lowerCAmelCase__ : Tuple = 9_1 lowerCAmelCase__ : Optional[int] = "coco-detection-id2label.json" lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) ) , "r" ) ) lowerCAmelCase__ : List[Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} lowerCAmelCase__ : Any = idalabel lowerCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()} return config def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Any = [] # stem # fmt: off rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") ) rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") ) rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") ) rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") ) rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") ) rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") ) rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") ) rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') ) # fmt: on return rename_keys def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = dct.pop(lowerCamelCase_ ) lowerCAmelCase__ : int = val def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): lowerCAmelCase__ : str = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) lowerCAmelCase__ : str = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' ) lowerCAmelCase__ : Union[str, Any] = state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ : Union[str, Any] = in_proj_weight[:dim, :] lowerCAmelCase__ : Union[str, Any] = in_proj_bias[: dim] lowerCAmelCase__ : List[str] = in_proj_weight[ dim : dim * 2, : ] lowerCAmelCase__ : str = in_proj_bias[ dim : dim * 2 ] lowerCAmelCase__ : List[str] = in_proj_weight[ -dim :, : ] lowerCAmelCase__ : Optional[Any] = in_proj_bias[-dim :] # fmt: on def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Dict = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention lowerCAmelCase__ : Any = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) lowerCAmelCase__ : Union[str, Any] = state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ : Any = in_proj_weight[:hidden_size, :] lowerCAmelCase__ : Tuple = in_proj_bias[:hidden_size] lowerCAmelCase__ : Optional[Any] = in_proj_weight[ hidden_size : hidden_size * 2, : ] lowerCAmelCase__ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2] lowerCAmelCase__ : Dict = in_proj_weight[-hidden_size:, :] lowerCAmelCase__ : List[str] = in_proj_bias[-hidden_size:] def UpperCAmelCase_ ( ): """simple docstring""" lowerCAmelCase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) return im @torch.no_grad() def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : int = get_deta_config(lowerCamelCase_ ) # load original state dict if model_name == "deta-swin-large": lowerCAmelCase__ : Optional[int] = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" ) elif model_name == "deta-swin-large-o365": lowerCAmelCase__ : Tuple = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" ) else: raise ValueError(f'''Model name {model_name} not supported''' ) lowerCAmelCase__ : str = torch.load(lowerCamelCase_ , map_location="cpu" )["model"] # original state dict for name, param in state_dict.items(): print(lowerCamelCase_ , param.shape ) # rename keys lowerCAmelCase__ : Optional[int] = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) read_in_swin_q_k_v(lowerCamelCase_ , config.backbone_config ) read_in_decoder_q_k_v(lowerCamelCase_ , lowerCamelCase_ ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: lowerCAmelCase__ : Optional[Any] = state_dict.pop(lowerCamelCase_ ) lowerCAmelCase__ : Optional[int] = val if "input_proj" in key: lowerCAmelCase__ : Optional[int] = state_dict.pop(lowerCamelCase_ ) lowerCAmelCase__ : str = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: lowerCAmelCase__ : List[str] = state_dict.pop(lowerCamelCase_ ) lowerCAmelCase__ : Optional[Any] = val # finally, create HuggingFace model and load state dict lowerCAmelCase__ : Tuple = DetaForObjectDetection(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) model.eval() lowerCAmelCase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu" model.to(lowerCamelCase_ ) # load image processor lowerCAmelCase__ : Dict = DetaImageProcessor(format="coco_detection" ) # verify our conversion on image lowerCAmelCase__ : Union[str, Any] = prepare_img() lowerCAmelCase__ : Optional[int] = processor(images=lowerCamelCase_ , return_tensors="pt" ) lowerCAmelCase__ : Dict = encoding["pixel_values"] lowerCAmelCase__ : Optional[Any] = model(pixel_values.to(lowerCamelCase_ ) ) # verify logits print("Logits:" , outputs.logits[0, :3, :3] ) print("Boxes:" , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": lowerCAmelCase__ : Any = torch.tensor( [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] ) lowerCAmelCase__ : str = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] ) elif model_name == "deta-swin-large-o365": lowerCAmelCase__ : Optional[int] = torch.tensor( [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] ) lowerCAmelCase__ : Dict = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(lowerCamelCase_ ) , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(lowerCamelCase_ ) , atol=1e-4 ) print("Everything ok!" ) if pytorch_dump_folder_path: # Save model and processor logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' ) Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) processor.save_pretrained(lowerCamelCase_ ) # Push to hub if push_to_hub: print("Pushing model and processor to hub..." ) model.push_to_hub(f'''jozhang97/{model_name}''' ) processor.push_to_hub(f'''jozhang97/{model_name}''' ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument( """--model_name""", type=str, default="""deta-swin-large""", choices=["""deta-swin-large""", """deta-swin-large-o365"""], help="""Name of the model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
378
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _snake_case : Tuple = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''shortest_edge''': 2_56} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any: """simple docstring""" _a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase_ ): _a = target_sizes.numpy() _a = [] for idx in range(len(lowerCAmelCase_ ) ): _a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ ) _a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase_ ) else: _a = logits.argmax(dim=1 ) _a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
"""simple docstring""" import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _A = logging.get_logger(__name__) _A = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class __UpperCAmelCase ( _a ): """simple docstring""" _snake_case : List[str] = 'conditional_detr' _snake_case : List[str] = ['past_key_values'] _snake_case : Optional[int] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Dict , A_ : str=True , A_ : Optional[int]=None , A_ : List[str]=3 , A_ : List[str]=3_00 , A_ : Tuple=6 , A_ : Dict=20_48 , A_ : int=8 , A_ : Union[str, Any]=6 , A_ : Optional[int]=20_48 , A_ : Tuple=8 , A_ : List[str]=0.0 , A_ : Dict=0.0 , A_ : int=True , A_ : Tuple="relu" , A_ : List[str]=2_56 , A_ : List[Any]=0.1 , A_ : str=0.0 , A_ : Tuple=0.0 , A_ : Optional[int]=0.02 , A_ : List[Any]=1.0 , A_ : Union[str, Any]=False , A_ : int="sine" , A_ : Optional[Any]="resnet50" , A_ : Union[str, Any]=True , A_ : List[str]=False , A_ : Dict=2 , A_ : Any=5 , A_ : Dict=2 , A_ : List[Any]=1 , A_ : str=1 , A_ : List[Any]=2 , A_ : str=5 , A_ : Optional[Any]=2 , A_ : int=0.25 , **A_ : Optional[Any] , )-> List[str]: if backbone_config is not None and use_timm_backbone: raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) __UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __UpperCamelCase = backbone_config.get("model_type" ) __UpperCamelCase = CONFIG_MAPPING[backbone_model_type] __UpperCamelCase = config_class.from_dict(lowerCAmelCase_ ) __UpperCamelCase = use_timm_backbone __UpperCamelCase = backbone_config __UpperCamelCase = num_channels __UpperCamelCase = num_queries __UpperCamelCase = d_model __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = init_xavier_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = encoder_layers __UpperCamelCase = auxiliary_loss __UpperCamelCase = position_embedding_type __UpperCamelCase = backbone __UpperCamelCase = use_pretrained_backbone __UpperCamelCase = dilation # Hungarian matcher __UpperCamelCase = class_cost __UpperCamelCase = bbox_cost __UpperCamelCase = giou_cost # Loss coefficients __UpperCamelCase = mask_loss_coefficient __UpperCamelCase = dice_loss_coefficient __UpperCamelCase = cls_loss_coefficient __UpperCamelCase = bbox_loss_coefficient __UpperCamelCase = giou_loss_coefficient __UpperCamelCase = focal_alpha super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def A ( self : List[Any] )-> int: return self.encoder_attention_heads @property def A ( self : Tuple )-> int: return self.d_model def A ( self : Tuple )-> int: __UpperCamelCase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __UpperCamelCase = self.backbone_config.to_dict() __UpperCamelCase = self.__class__.model_type return output class __UpperCAmelCase ( _a ): """simple docstring""" _snake_case : Dict = version.parse('1.11' ) @property def A ( self : int )-> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def A ( self : str )-> float: return 1e-5 @property def A ( self : str )-> int: return 12
505
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ): '''simple docstring''' _a = {} if train_file is not None: _a = [train_file] if eval_file is not None: _a = [eval_file] if test_file is not None: _a = [test_file] _a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase ) _a = list(ds[list(files.keys() )[0]].features.keys() ) _a = features_name.pop(UpperCamelCase ) _a = list(set(ds[list(files.keys() )[0]][label_name] ) ) _a = {label: i for i, label in enumerate(UpperCamelCase )} _a = tokenizer.model_input_names _a = {} if len(UpperCamelCase ) == 1: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , ) elif len(UpperCamelCase ) == 2: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _snake_case : str = logging.getLogger(__name__) @dataclass class A : lowercase_ = field(metadata={'help': 'Which column contains the label'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} ) lowercase_ = field( default=128 ,metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) lowercase_ = field( default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A : lowercase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowercase_ = field( default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,) def snake_case_ (): '''simple docstring''' _a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _a , _a , _a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _a , _a , _a , _a = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _a = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict: _a = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _a = TFTrainer( model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _a = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _a = trainer.evaluate() _a = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(UpperCamelCase ) return results if __name__ == "__main__": main()
22
0
import re import subprocess import sys _UpperCamelCase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') _UpperCamelCase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split() _UpperCamelCase = '|'.join(sys.argv[1:]) _UpperCamelCase = re.compile(RF"""^({joined_dirs}).*?\.py$""") _UpperCamelCase = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
146
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( _a ,unittest.TestCase ): lowercase_ = LEDTokenizer lowercase_ = LEDTokenizerFast lowercase_ = True def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" super().setUp() _a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a = {'''unk_token''': '''<unk>'''} _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]: """simple docstring""" return "lower newer", "lower newer" @cached_property def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def __lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _a = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , lowerCAmelCase_ ) self.assertIn('''attention_mask''' , lowerCAmelCase_ ) self.assertNotIn('''labels''' , lowerCAmelCase_ ) self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" _a = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer( ['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = ['''A long paragraph for summarization.'''] _a = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' ) _a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' ) _a = inputs['''input_ids'''] _a = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = ['''Summary of the text.''', '''Another summary.'''] _a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']] _a = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" pass def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = '''A, <mask> AllenNLP sentence.''' _a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
22
0
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = 'The Nymphenburg Palace is a beautiful palace in Munich!' def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Tuple: """simple docstring""" lowercase : Optional[Any] = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 1024, '''hidden_size''': 768, '''max_length''': 512, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 1024, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1e-5, '''token_type_vocab_size''': 2, } lowercase : int = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowercase : Optional[Any] = BERTEncoder( attention_cell=predefined_args['''attention_cell'''], num_layers=predefined_args['''num_layers'''], units=predefined_args['''units'''], hidden_size=predefined_args['''hidden_size'''], max_length=predefined_args['''max_length'''], num_heads=predefined_args['''num_heads'''], scaled=predefined_args['''scaled'''], dropout=predefined_args['''dropout'''], output_attention=_UpperCamelCase, output_all_encodings=_UpperCamelCase, use_residual=predefined_args['''use_residual'''], activation=predefined_args.get('''activation''', '''gelu''' ), layer_norm_eps=predefined_args.get('''layer_norm_eps''', _UpperCamelCase ), ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowercase : Any = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab lowercase : Tuple = os.path.join(get_home_dir(), '''models''' ) lowercase : Optional[int] = _load_vocab(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, cls=_UpperCamelCase ) lowercase : List[Any] = nlp.model.BERTModel( _UpperCamelCase, len(_UpperCamelCase ), units=predefined_args['''units'''], embed_size=predefined_args['''embed_size'''], embed_dropout=predefined_args['''embed_dropout'''], word_embed=predefined_args['''word_embed'''], use_pooler=_UpperCamelCase, use_token_type_embed=_UpperCamelCase, token_type_vocab_size=predefined_args['''token_type_vocab_size'''], use_classifier=_UpperCamelCase, use_decoder=_UpperCamelCase, ) original_bort.load_parameters(_UpperCamelCase, cast_dtype=_UpperCamelCase, ignore_extra=_UpperCamelCase ) lowercase : Optional[Any] = original_bort._collect_params_with_prefix() # Build our config 🤗 lowercase : List[Any] = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.0_2, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(_UpperCamelCase ), } lowercase : List[str] = BertConfig.from_dict(_UpperCamelCase ) lowercase : str = BertForMaskedLM(_UpperCamelCase ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(_UpperCamelCase ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(_UpperCamelCase, _UpperCamelCase ): lowercase : Optional[int] = hf_param.shape lowercase : Tuple = to_torch(params[gluon_param] ) lowercase : Dict = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param lowercase : List[str] = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight, '''word_embed.0.weight''' ) lowercase : Optional[Any] = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight, '''encoder.position_weight''' ) lowercase : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias, '''encoder.layer_norm.beta''' ) lowercase : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight, '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowercase : List[str] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowercase : List[str] = hf_bort_model.bert.encoder.layer[i] # self attention lowercase : str = layer.attention.self lowercase : List[str] = check_and_map_params( self_attn.key.bias.data, f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) lowercase : str = check_and_map_params( self_attn.key.weight.data, f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) lowercase : int = check_and_map_params( self_attn.query.bias.data, f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) lowercase : Any = check_and_map_params( self_attn.query.weight.data, f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) lowercase : Optional[int] = check_and_map_params( self_attn.value.bias.data, f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) lowercase : Optional[int] = check_and_map_params( self_attn.value.weight.data, f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output lowercase : Tuple = layer.attention.output lowercase : List[Any] = check_and_map_params( self_output.dense.bias, f"""encoder.transformer_cells.{i}.proj.bias""" ) lowercase : List[Any] = check_and_map_params( self_output.dense.weight, f"""encoder.transformer_cells.{i}.proj.weight""" ) lowercase : List[Any] = check_and_map_params( self_output.LayerNorm.bias, f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) lowercase : Any = check_and_map_params( self_output.LayerNorm.weight, f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate lowercase : Tuple = layer.intermediate lowercase : List[Any] = check_and_map_params( intermediate.dense.bias, f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) lowercase : List[Any] = check_and_map_params( intermediate.dense.weight, f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output lowercase : str = layer.output lowercase : Optional[Any] = check_and_map_params( bert_output.dense.bias, f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) lowercase : List[Any] = check_and_map_params( bert_output.dense.weight, f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) lowercase : str = check_and_map_params( bert_output.LayerNorm.bias, f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) lowercase : Optional[int] = check_and_map_params( bert_output.LayerNorm.weight, f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowercase : List[Any] = RobertaTokenizer.from_pretrained('''roberta-base''' ) lowercase : Any = tokenizer.encode_plus(_UpperCamelCase )['''input_ids'''] # Get gluon output lowercase : Tuple = mx.nd.array([input_ids] ) lowercase : List[Any] = original_bort(inputs=_UpperCamelCase, token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(_UpperCamelCase ) lowercase : List[Any] = BertModel.from_pretrained(_UpperCamelCase ) hf_bort_model.eval() lowercase : Tuple = tokenizer.encode_plus(_UpperCamelCase, return_tensors='''pt''' ) lowercase : Union[str, Any] = hf_bort_model(**_UpperCamelCase )[0] lowercase : List[Any] = output_gluon[0].asnumpy() lowercase : int = output_hf[0].detach().numpy() lowercase : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowercase : str = np.allclose(_UpperCamelCase, _UpperCamelCase, atol=1e-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''', _UpperCamelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __a = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
319
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def snake_case_ (UpperCamelCase : SplitDict ): '''simple docstring''' _a = split_dict._to_yaml_list() assert len(UpperCamelCase ) == len(UpperCamelCase ) _a = SplitDict._from_yaml_list(UpperCamelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump _a = None # the split name of split_dict takes over the name of the split info object _a = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] ) def snake_case_ (UpperCamelCase : List[str] ): '''simple docstring''' _a = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
22
0
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class __lowerCAmelCase ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 'Wav2Vec2FeatureExtractor' _SCREAMING_SNAKE_CASE = 'AutoTokenizer' def __init__( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) snake_case_ = self.feature_extractor snake_case_ = False @classmethod def lowerCAmelCase__ ( cls : Optional[Any] , _lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" try: return super().from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) except OSError: warnings.warn( F'''Loading a tokenizer inside {cls.__name__} from a config that does not''' " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , lowerCAmelCase_ , ) snake_case_ = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) snake_case_ = WavaVecaCTCTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) return cls(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) def __call__( self : str , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ) -> Optional[int]: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) snake_case_ = kwargs.pop("raw_speech" ) else: snake_case_ = kwargs.pop("audio" , lowerCAmelCase_ ) snake_case_ = kwargs.pop("sampling_rate" , lowerCAmelCase_ ) snake_case_ = kwargs.pop("text" , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: snake_case_ = args[0] snake_case_ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: snake_case_ = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None: snake_case_ = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ ) if text is None: return inputs elif audio is None: return encodings else: snake_case_ = encodings["input_ids"] return inputs def lowerCAmelCase__ ( self : str , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : Any ) -> Any: """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*lowerCAmelCase_ , **lowerCAmelCase_ ) snake_case_ = kwargs.pop("input_features" , lowerCAmelCase_ ) snake_case_ = kwargs.pop("labels" , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: snake_case_ = args[0] snake_case_ = args[1:] if input_features is not None: snake_case_ = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) if labels is not None: snake_case_ = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ ) if labels is None: return input_features elif input_features is None: return labels else: snake_case_ = labels["input_ids"] return input_features def lowerCAmelCase__ ( self : Optional[int] , *_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase__ ( self : Tuple , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @contextmanager def lowerCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) snake_case_ = True snake_case_ = self.tokenizer yield snake_case_ = self.feature_extractor snake_case_ = False
283
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n' class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" _a = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _a = self.diffusers_dir shutil.copy( os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _a = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]: """simple docstring""" _a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: _a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result _a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) _a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ ) _a = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f: f.write(lowerCAmelCase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ ) with open(lowerCAmelCase_ , '''r''' ) as f: self.assertTrue(f.read() , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , ) # Copy consistency with a really long name _a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
22
0
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase ( nn.Module ): lowercase__ : List[Any] = 42 lowercase__ : int = 42 lowercase__ : Any = 0.0 lowercase__ : Optional[Any] = 1 lowercase__ : Any = 1 lowercase__ : Tuple = True lowercase__ : Union[str, Any] = False lowercase__ : Union[str, Any] = False lowercase__ : str = False lowercase__ : str = jnp.floataa def __snake_case( self : Optional[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE = self.in_channels if i == 0 else self.out_channels SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = resnets SCREAMING_SNAKE_CASE = attentions if self.add_downsample: SCREAMING_SNAKE_CASE = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : str=True ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = () for resnet, attn in zip(self.resnets , self.attentions ): SCREAMING_SNAKE_CASE = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ ) output_states += (hidden_states,) if self.add_downsample: SCREAMING_SNAKE_CASE = self.downsamplers_a(lowerCAmelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowercase ( nn.Module ): lowercase__ : str = 42 lowercase__ : Optional[int] = 42 lowercase__ : Optional[Any] = 0.0 lowercase__ : List[Any] = 1 lowercase__ : int = True lowercase__ : List[Any] = jnp.floataa def __snake_case( self : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE = self.in_channels if i == 0 else self.out_channels SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = resnets if self.add_downsample: SCREAMING_SNAKE_CASE = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=True ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = () for resnet in self.resnets: SCREAMING_SNAKE_CASE = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ ) output_states += (hidden_states,) if self.add_downsample: SCREAMING_SNAKE_CASE = self.downsamplers_a(lowerCAmelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class lowercase ( nn.Module ): lowercase__ : List[Any] = 42 lowercase__ : Any = 42 lowercase__ : Optional[Any] = 42 lowercase__ : List[Any] = 0.0 lowercase__ : List[str] = 1 lowercase__ : Any = 1 lowercase__ : int = True lowercase__ : Tuple = False lowercase__ : Dict = False lowercase__ : Any = False lowercase__ : Any = jnp.floataa def __snake_case( self : Optional[int] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE = self.in_channels if (i == self.num_layers - 1) else self.out_channels SCREAMING_SNAKE_CASE = self.prev_output_channel if i == 0 else self.out_channels SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = resnets SCREAMING_SNAKE_CASE = attentions if self.add_upsample: SCREAMING_SNAKE_CASE = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int]=True ) -> int: '''simple docstring''' for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states SCREAMING_SNAKE_CASE = res_hidden_states_tuple[-1] SCREAMING_SNAKE_CASE = res_hidden_states_tuple[:-1] SCREAMING_SNAKE_CASE = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) SCREAMING_SNAKE_CASE = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ ) if self.add_upsample: SCREAMING_SNAKE_CASE = self.upsamplers_a(lowerCAmelCase_ ) return hidden_states class lowercase ( nn.Module ): lowercase__ : Dict = 42 lowercase__ : Union[str, Any] = 42 lowercase__ : Optional[Any] = 42 lowercase__ : Tuple = 0.0 lowercase__ : Union[str, Any] = 1 lowercase__ : str = True lowercase__ : int = jnp.floataa def __snake_case( self : Optional[int] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE = self.in_channels if (i == self.num_layers - 1) else self.out_channels SCREAMING_SNAKE_CASE = self.prev_output_channel if i == 0 else self.out_channels SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = resnets if self.add_upsample: SCREAMING_SNAKE_CASE = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[int]=True ) -> Optional[Any]: '''simple docstring''' for resnet in self.resnets: # pop res hidden states SCREAMING_SNAKE_CASE = res_hidden_states_tuple[-1] SCREAMING_SNAKE_CASE = res_hidden_states_tuple[:-1] SCREAMING_SNAKE_CASE = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) SCREAMING_SNAKE_CASE = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ ) if self.add_upsample: SCREAMING_SNAKE_CASE = self.upsamplers_a(lowerCAmelCase_ ) return hidden_states class lowercase ( nn.Module ): lowercase__ : Dict = 42 lowercase__ : int = 0.0 lowercase__ : Union[str, Any] = 1 lowercase__ : Union[str, Any] = 1 lowercase__ : int = False lowercase__ : Optional[Any] = False lowercase__ : Dict = jnp.floataa def __snake_case( self : Any ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] SCREAMING_SNAKE_CASE = [] for _ in range(self.num_layers ): SCREAMING_SNAKE_CASE = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = resnets SCREAMING_SNAKE_CASE = attentions def __call__( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=True ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.resnets[0](lowerCAmelCase_ , lowerCAmelCase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): SCREAMING_SNAKE_CASE = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ ) return hidden_states
403
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _snake_case : Tuple = logging.get_logger(__name__) _snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _snake_case : List[Any] = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } _snake_case : Union[str, Any] = { 'squeezebert/squeezebert-uncased': 512, 'squeezebert/squeezebert-mnli': 512, 'squeezebert/squeezebert-mnli-headless': 512, } _snake_case : Tuple = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class A ( _a ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = SqueezeBertTokenizer def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int: """simple docstring""" super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars ): _a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**lowerCAmelCase_ ) _a = do_lower_case def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]: """simple docstring""" _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" _a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ )
22
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Tuple = logging.get_logger(__name__) A_ : Union[str, Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowerCAmelCase( _a ): """simple docstring""" a : Optional[int] ='''megatron-bert''' def __init__( self , _lowerCamelCase=2_9_0_5_6 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=2_4 , _lowerCamelCase=1_6 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=0 , _lowerCamelCase="absolute" , _lowerCamelCase=True , **_lowerCamelCase , ): super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCamelCase_: List[str] = vocab_size UpperCamelCase_: Optional[int] = hidden_size UpperCamelCase_: Any = num_hidden_layers UpperCamelCase_: str = num_attention_heads UpperCamelCase_: Union[str, Any] = hidden_act UpperCamelCase_: str = intermediate_size UpperCamelCase_: Any = hidden_dropout_prob UpperCamelCase_: Optional[Any] = attention_probs_dropout_prob UpperCamelCase_: List[Any] = max_position_embeddings UpperCamelCase_: str = type_vocab_size UpperCamelCase_: Any = initializer_range UpperCamelCase_: List[str] = layer_norm_eps UpperCamelCase_: int = position_embedding_type UpperCamelCase_: Optional[int] = use_cache
57
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case : Dict = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = do_rescale _a = do_normalize _a = do_center_crop _a = crop_size _a = size _a = resample _a = rescale_factor _a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "shortest_edge" in size: _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _a = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = do_rescale if do_rescale is not None else self.do_rescale _a = do_normalize if do_normalize is not None else self.do_normalize _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ ) if not is_batched(lowerCAmelCase_ ): _a = [images] if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
22
0
"""simple docstring""" from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]: """simple docstring""" lowerCAmelCase__ :int = [] for part_id in partition_order: lowerCAmelCase__ :Dict = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect() for row_idx, row in enumerate(_SCREAMING_SNAKE_CASE ): expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def __A () ->int: """simple docstring""" lowerCAmelCase__ :Tuple = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase__ :Union[str, Any] = spark.range(100 ).repartition(1 ) lowerCAmelCase__ :int = Spark(_SCREAMING_SNAKE_CASE ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def __A () ->Dict: """simple docstring""" lowerCAmelCase__ :Tuple = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase__ :Dict = spark.range(10 ).repartition(2 ) lowerCAmelCase__ :Union[str, Any] = [1, 0] lowerCAmelCase__ :int = _generate_iterable_examples(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Reverse the partitions. lowerCAmelCase__ :Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i, (row_id, row_dict) in enumerate(generate_fn() ): lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __A () ->str: """simple docstring""" lowerCAmelCase__ :Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase__ :int = spark.range(10 ).repartition(1 ) lowerCAmelCase__ :Any = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ): assert row_id == F"0_{i}" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def __A () ->Optional[Any]: """simple docstring""" lowerCAmelCase__ :str = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase__ :Tuple = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('numpy.random.Generator' ) as generator_mock: lowerCAmelCase__ :Optional[int] = lambda _SCREAMING_SNAKE_CASE : x.reverse() lowerCAmelCase__ :Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [2, 1, 0] ) lowerCAmelCase__ :Any = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shuffle_data_sources(_SCREAMING_SNAKE_CASE ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __A () ->Optional[int]: """simple docstring""" lowerCAmelCase__ :Dict = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase__ :str = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 lowerCAmelCase__ :Union[str, Any] = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCAmelCase__ :Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [0, 2] ) for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 lowerCAmelCase__ :Optional[int] = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 lowerCAmelCase__ :Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [1, 3] ) for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ): lowerCAmelCase__ , lowerCAmelCase__ :List[str] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def __A () ->List[str]: """simple docstring""" lowerCAmelCase__ :List[str] = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() lowerCAmelCase__ :List[Any] = spark.range(100 ).repartition(1 ) lowerCAmelCase__ :Union[str, Any] = Spark(_SCREAMING_SNAKE_CASE ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
93
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case : str = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[int] = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = ['LayoutLMv3FeatureExtractor'] _snake_case : Tuple = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys _snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
22
0
'''simple docstring''' def lowerCamelCase_ ( A_ ): if not isinstance(A_ , A_ ): raise TypeError('''only integers accepted as input''' ) else: __lowerCamelCase = str(abs(A_ ) ) __lowerCamelCase = [list(A_ ) for char in range(len(A_ ) )] for index in range(len(A_ ) ): num_transpositions[index].pop(A_ ) return max( int(''''''.join(list(A_ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("doctest").testmod()
316
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A ( _a ): lowercase_ = (DDPMParallelScheduler,) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]: """simple docstring""" _a = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**lowerCAmelCase_ ) return config def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , ) def __lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5 def __lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = self.dummy_sample_deter + 0.1 _a = self.dummy_sample_deter - 0.1 _a = samplea.shape[0] _a = torch.stack([samplea, samplea, samplea] , dim=0 ) _a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ ) _a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config(prediction_type='''v_prediction''' ) _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def __lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) _a = scheduler.timesteps for i, timestep in enumerate(lowerCAmelCase_ ): if i == len(lowerCAmelCase_ ) - 1: _a = -1 else: _a = timesteps[i + 1] _a = scheduler.previous_timestep(lowerCAmelCase_ ) _a = prev_t.item() self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 51, 0] with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] _a = len(lowerCAmelCase_ ) with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
22
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase__ ( _a , unittest.TestCase ): A_ : Dict = KandinskyImgaImgPipeline A_ : Tuple = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] A_ : Union[str, Any] = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] A_ : List[Any] = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] A_ : Dict = False @property def __UpperCamelCase ( self : Optional[int] ) -> Tuple: return 32 @property def __UpperCamelCase ( self : Optional[Any] ) -> str: return 32 @property def __UpperCamelCase ( self : Dict ) -> Optional[int]: return self.time_input_dim @property def __UpperCamelCase ( self : Optional[Any] ) -> str: return self.time_input_dim * 4 @property def __UpperCamelCase ( self : int ) -> Optional[Any]: return 100 @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: A = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: torch.manual_seed(0 ) A = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) A = MultilingualCLIP(lowerCAmelCase_ ) A = text_encoder.eval() return text_encoder @property def __UpperCamelCase ( self : List[str] ) -> Tuple: torch.manual_seed(0 ) A = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } A = UNetaDConditionModel(**lowerCAmelCase_ ) return model @property def __UpperCamelCase ( self : Optional[Any] ) -> int: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase ( self : Any ) -> str: torch.manual_seed(0 ) A = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase ( self : int ) -> str: A = self.dummy_text_encoder A = self.dummy_tokenizer A = self.dummy_unet A = self.dummy_movq A = { 'num_train_timesteps': 1_000, 'beta_schedule': 'linear', 'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } A = DDIMScheduler(**lowerCAmelCase_ ) A = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any]=0 ) -> List[str]: A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCAmelCase_ ) # create init_image A = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) A = image.cpu().permute(0 , 2 , 3 , 1 )[0] A = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' ).resize((256, 256) ) if str(lowerCAmelCase_ ).startswith('mps' ): A = torch.manual_seed(lowerCAmelCase_ ) else: A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) A = { 'prompt': 'horse', 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def __UpperCamelCase ( self : Optional[int] ) -> List[str]: A = 'cpu' A = self.get_dummy_components() A = self.pipeline_class(**lowerCAmelCase_ ) A = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) A = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) ) A = output.images A = pipe( **self.get_dummy_inputs(lowerCAmelCase_ ) , return_dict=lowerCAmelCase_ , )[0] A = image[0, -3:, -3:, -1] A = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any] ) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Tuple ) -> Optional[int]: A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_img2img_frog.npy' ) A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) A = 'A red cartoon frog, 4k' A = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(lowerCAmelCase_ ) A = KandinskyImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa ) A = pipeline.to(lowerCAmelCase_ ) pipeline.set_progress_bar_config(disable=lowerCAmelCase_ ) A = torch.Generator(device='cpu' ).manual_seed(0 ) A , A = pipe_prior( lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() A = pipeline( lowerCAmelCase_ , image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , ) A = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
106
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def snake_case_ (UpperCamelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ): '''simple docstring''' _a = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(UpperCamelCase , UpperCamelCase ) # Predict target for test data _a = xgb.predict(UpperCamelCase ) _a = predictions.reshape(len(UpperCamelCase ) , 1 ) return predictions def snake_case_ (): '''simple docstring''' _a = fetch_california_housing() _a , _a = data_handling(UpperCamelCase ) _a , _a , _a , _a = train_test_split( UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 ) _a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Error printing print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' ) print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
22
0
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : str = 1 __snake_case : int = 3 __snake_case : List[str] = (32, 32) __snake_case : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase_ ) return image @property def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def UpperCAmelCase ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(lowerCAmelCase_ ) @property def UpperCAmelCase ( self ) -> str: '''simple docstring''' def extract(*UpperCAmelCase , **UpperCAmelCase ): class _lowerCamelCase : """simple docstring""" def __init__( self ) -> int: '''simple docstring''' __snake_case : Tuple = torch.ones([0] ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' self.pixel_values.to(lowerCAmelCase_ ) return self return Out() return extract def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' __snake_case : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator __snake_case : Optional[Any] = self.dummy_cond_unet __snake_case : List[str] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , ) __snake_case : Dict = self.dummy_vae __snake_case : Optional[Any] = self.dummy_text_encoder __snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk __snake_case : str = StableDiffusionPipeline( unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , ) __snake_case : Union[str, Any] = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) __snake_case : Optional[Any] = "A painting of a squirrel eating a burger" __snake_case : str = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 ) __snake_case : str = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) __snake_case : Any = output.images __snake_case : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 ) __snake_case : int = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCAmelCase_ , )[0] __snake_case : List[str] = image[0, -3:, -3:, -1] __snake_case : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : Tuple = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator __snake_case : List[str] = self.dummy_cond_unet __snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ ) __snake_case : Any = self.dummy_vae __snake_case : Optional[int] = self.dummy_text_encoder __snake_case : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk __snake_case : List[str] = StableDiffusionPipeline( unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , ) __snake_case : Any = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) __snake_case : Union[str, Any] = "A painting of a squirrel eating a burger" __snake_case : Any = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 ) __snake_case : Dict = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) __snake_case : str = output.images __snake_case : Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 ) __snake_case : Optional[Any] = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCAmelCase_ , )[0] __snake_case : Union[str, Any] = image[0, -3:, -3:, -1] __snake_case : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : str = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=lowerCAmelCase_ ) assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) assert isinstance(pipe.scheduler , lowerCAmelCase_ ) assert pipe.safety_checker is None __snake_case : Optional[int] = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCAmelCase_ ) __snake_case : int = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None __snake_case : Optional[int] = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.dummy_cond_unet __snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ ) __snake_case : Tuple = self.dummy_vae __snake_case : Any = self.dummy_text_encoder __snake_case : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 __snake_case : str = unet.half() __snake_case : Union[str, Any] = vae.half() __snake_case : List[Any] = bert.half() # make sure here that pndm scheduler skips prk __snake_case : List[str] = StableDiffusionPipeline( unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , feature_extractor=self.dummy_extractor , ) __snake_case : List[str] = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) __snake_case : Union[str, Any] = "A painting of a squirrel eating a burger" __snake_case : Dict = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' __snake_case : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=lowerCAmelCase_ ) __snake_case : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __snake_case : Optional[Any] = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) __snake_case : Optional[Any] = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) __snake_case : str = 4003660346 __snake_case : Tuple = 7 # without safety guidance (sld_guidance_scale = 0) __snake_case : Any = torch.manual_seed(lowerCAmelCase_ ) __snake_case : int = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) __snake_case : Dict = output.images __snake_case : Tuple = image[0, -3:, -3:, -1] __snake_case : str = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) __snake_case : str = torch.manual_seed(lowerCAmelCase_ ) __snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __snake_case : Union[str, Any] = output.images __snake_case : Optional[int] = image[0, -3:, -3:, -1] __snake_case : Optional[int] = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=lowerCAmelCase_ ) __snake_case : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __snake_case : Optional[Any] = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) __snake_case : List[str] = "padme amidala taking a bath artwork, safe for work, no nudity" __snake_case : List[str] = 2734971755 __snake_case : Tuple = 7 __snake_case : str = torch.manual_seed(lowerCAmelCase_ ) __snake_case : Optional[int] = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) __snake_case : Any = output.images __snake_case : Union[str, Any] = image[0, -3:, -3:, -1] __snake_case : Dict = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 __snake_case : Optional[int] = torch.manual_seed(lowerCAmelCase_ ) __snake_case : List[str] = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __snake_case : str = output.images __snake_case : List[str] = image[0, -3:, -3:, -1] __snake_case : Tuple = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) __snake_case : Tuple = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) __snake_case : int = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) __snake_case : Dict = 1044355234 __snake_case : int = 12 __snake_case : List[str] = torch.manual_seed(lowerCAmelCase_ ) __snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) __snake_case : str = output.images __snake_case : int = image[0, -3:, -3:, -1] __snake_case : Union[str, Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 __snake_case : List[Any] = torch.manual_seed(lowerCAmelCase_ ) __snake_case : Tuple = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=lowerCAmelCase_ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __snake_case : Optional[int] = output.images __snake_case : Dict = image[0, -3:, -3:, -1] __snake_case : int = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
243
'''simple docstring''' import qiskit def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' _a = qiskit.Aer.get_backend('''aer_simulator''' ) _a = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator _a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(UpperCamelCase ) if __name__ == "__main__": _snake_case : Tuple = half_adder(1, 1) print(F'''Half Adder Output Qubit Counts: {counts}''')
22
0
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("Undefined for non-integers" ) elif precision < 1: raise ValueError("Undefined for non-natural numbers" ) lowerCAmelCase__ : List[Any] = precision lowerCAmelCase__ : Union[str, Any] = ceil(precision / 1_4 ) lowerCAmelCase__ : Optional[Any] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt() lowerCAmelCase__ : Dict = 1 lowerCAmelCase__ : Union[str, Any] = 1_3_5_9_1_4_0_9 lowerCAmelCase__ : List[Any] = Decimal(lowerCamelCase_ ) for k in range(1 , lowerCamelCase_ ): lowerCAmelCase__ : int = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase_ ) ** 3) linear_term += 5_4_5_1_4_0_1_3_4 exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": snake_case = 50 print(f'The first {n} digits of pi is: {pi(n)}')
378
'''simple docstring''' from collections.abc import Generator from math import sin def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) != 32: raise ValueError('''Input must be of length 32''' ) _a = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''08x''' )[-8:] _a = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = B'''''' for char in message: bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' ) _a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCamelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(UpperCamelCase ) , 512 ): _a = bit_string[pos : pos + 512] _a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''032b''' ) _a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCamelCase , 2 ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' return (a + b) % 2**32 def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = preprocess(UpperCamelCase ) _a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _a = 0X67452301 _a = 0Xefcdab89 _a = 0X98badcfe _a = 0X10325476 _a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCamelCase ): _a = aa _a = ba _a = ca _a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _a = d ^ (b & (c ^ d)) _a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _a = c ^ (d & (b ^ c)) _a = (5 * i + 1) % 16 elif i <= 47: _a = b ^ c ^ d _a = (3 * i + 5) % 16 else: _a = c ^ (b | not_aa(UpperCamelCase )) _a = (7 * i) % 16 _a = (f + a + added_consts[i] + block_words[g]) % 2**32 _a = d _a = c _a = b _a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) ) # Add hashed chunk to running total _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = {} if train_file is not None: __UpperCamelCase = [train_file] if eval_file is not None: __UpperCamelCase = [eval_file] if test_file is not None: __UpperCamelCase = [test_file] __UpperCamelCase = datasets.load_dataset("csv" ,data_files=_snake_case ) __UpperCamelCase = list(ds[list(files.keys() )[0]].features.keys() ) __UpperCamelCase = features_name.pop(_snake_case ) __UpperCamelCase = list(set(ds[list(files.keys() )[0]][label_name] ) ) __UpperCamelCase = {label: i for i, label in enumerate(_snake_case )} __UpperCamelCase = tokenizer.model_input_names __UpperCamelCase = {} if len(_snake_case ) == 1: for k in files.keys(): __UpperCamelCase = ds[k].map( lambda _snake_case : tokenizer.batch_encode_plus( example[features_name[0]] ,truncation=_snake_case ,max_length=_snake_case ,padding="max_length" ) ,batched=_snake_case ,) elif len(_snake_case ) == 2: for k in files.keys(): __UpperCamelCase = ds[k].map( lambda _snake_case : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) ,truncation=_snake_case ,max_length=_snake_case ,padding="max_length" ,) ,batched=_snake_case ,) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __UpperCamelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCamelCase = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __UpperCamelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCamelCase = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __UpperCamelCase = {k: v for k, v in ex.items() if k in input_names} __UpperCamelCase = labelaid[ex[label_name]] yield (d, label) __UpperCamelCase = ( tf.data.Dataset.from_generator( _snake_case ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __UpperCamelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __UpperCamelCase = ( tf.data.Dataset.from_generator( _snake_case ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __UpperCamelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __UpperCamelCase = ( tf.data.Dataset.from_generator( _snake_case ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __UpperCamelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _A = logging.getLogger(__name__) @dataclass class __UpperCAmelCase : """simple docstring""" _snake_case : Optional[Any] = field(metadata={'help': 'Which column contains the label'} ) _snake_case : Union[str, Any] = field(default=_a , metadata={'help': 'The path of the training file'} ) _snake_case : str = field(default=_a , metadata={'help': 'The path of the development file'} ) _snake_case : int = field(default=_a , metadata={'help': 'The path of the test file'} ) _snake_case : List[str] = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _snake_case : Optional[int] = field( default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class __UpperCAmelCase : """simple docstring""" _snake_case : List[str] = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _snake_case : List[str] = field( default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _snake_case : Any = field( default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _snake_case : Any = field(default=_a , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _snake_case : List[Any] = field( default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def lowercase () -> List[Any]: '''simple docstring''' __UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,) logger.info( f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ f"""16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_tfds( train_file=data_args.train_file ,eval_file=data_args.dev_file ,test_file=data_args.test_file ,tokenizer=_snake_case ,label_column_id=data_args.label_column_id ,max_seq_length=data_args.max_seq_length ,) __UpperCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=len(_snake_case ) ,labelaid=_snake_case ,idalabel={id: label for label, id in labelaid.items()} ,finetuning_task="text-classification" ,cache_dir=model_args.cache_dir ,) with training_args.strategy.scope(): __UpperCamelCase = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path ,from_pt=bool(".bin" in model_args.model_name_or_path ) ,config=_snake_case ,cache_dir=model_args.cache_dir ,) def compute_metrics(_snake_case ) -> Dict: __UpperCamelCase = np.argmax(p.predictions ,axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __UpperCamelCase = TFTrainer( model=_snake_case ,args=_snake_case ,train_dataset=_snake_case ,eval_dataset=_snake_case ,compute_metrics=_snake_case ,) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __UpperCamelCase = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) __UpperCamelCase = trainer.evaluate() __UpperCamelCase = os.path.join(training_args.output_dir ,"eval_results.txt" ) with open(_snake_case ,"w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f""" {key} = {value}""" ) writer.write(f"""{key} = {value}\n""" ) results.update(_snake_case ) return results if __name__ == "__main__": main()
505
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A ( unittest.TestCase ): def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]: """simple docstring""" _a = size if size is not None else {'''height''': 18, '''width''': 18} _a = parent _a = batch_size _a = num_channels _a = image_size _a = min_resolution _a = max_resolution _a = do_resize _a = size _a = do_normalize def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4], [-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A ( _a ,unittest.TestCase ): lowercase_ = ImageGPTImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _a = ImageGPTImageProcessingTester(self ) @property def __lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) ) def __lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" _a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) _a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def __lowerCAmelCase ( self : str ) -> str: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) _a = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' ) image_processor_first.to_json_file(lowerCAmelCase_ ) _a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowerCAmelCase_ ) _a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" pass def snake_case_ (): '''simple docstring''' _a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) _a = Image.open(dataset[4]['''file'''] ) _a = Image.open(dataset[5]['''file'''] ) _a = [imagea, imagea] return images @require_vision @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) _a = prepare_images() # test non-batched _a = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 10_24) ) _a = [3_06, 1_91, 1_91] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ ) # test batched _a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 10_24) ) _a = [3_03, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
22
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase : '''simple docstring''' def __init__(self , __a , __a=2 , __a=True , __a=False , __a=10 , __a=3 , __a=32 * 8 , __a=32 * 8 , __a=4 , __a=64 , ) -> str: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = is_training UpperCAmelCase__ = use_auxiliary_loss UpperCAmelCase__ = num_queries UpperCAmelCase__ = num_channels UpperCAmelCase__ = min_size UpperCAmelCase__ = max_size UpperCAmelCase__ = num_labels UpperCAmelCase__ = hidden_dim UpperCAmelCase__ = hidden_dim def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCAmelCase_ ) UpperCAmelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase_ ) UpperCAmelCase__ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase_ ) > 0.5 ).float() UpperCAmelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase_ ) > 0.5).long() UpperCAmelCase__ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = MaskaFormerConfig( hidden_size=self.hidden_dim , ) UpperCAmelCase__ = self.num_queries UpperCAmelCase__ = self.num_labels UpperCAmelCase__ = [1, 1, 1, 1] UpperCAmelCase__ = self.num_channels UpperCAmelCase__ = 64 UpperCAmelCase__ = 128 UpperCAmelCase__ = self.hidden_dim UpperCAmelCase__ = self.hidden_dim UpperCAmelCase__ = self.hidden_dim return config def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.prepare_config_and_inputs() UpperCAmelCase__ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def UpperCamelCase__ (self , __a , __a ) -> Any: """simple docstring""" UpperCAmelCase__ = output.encoder_hidden_states UpperCAmelCase__ = output.pixel_decoder_hidden_states UpperCAmelCase__ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCAmelCase_ ) , config.decoder_layers ) def UpperCamelCase__ (self , __a , __a , __a , __a=False ) -> List[str]: """simple docstring""" with torch.no_grad(): UpperCAmelCase__ = MaskaFormerModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase__ = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ ) UpperCAmelCase__ = model(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCAmelCase_ , lowerCAmelCase_ ) def UpperCamelCase__ (self , __a , __a , __a , __a , __a ) -> str: """simple docstring""" UpperCAmelCase__ = MaskaFormerForUniversalSegmentation(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() def comm_check_on_output(__a ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): UpperCAmelCase__ = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ ) UpperCAmelCase__ = model(lowerCAmelCase_ ) comm_check_on_output(lowerCAmelCase_ ) UpperCAmelCase__ = model( pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ) comm_check_on_output(lowerCAmelCase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowercase ( _a , _a , unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __SCREAMING_SNAKE_CASE = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {} __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = MaskaFormerModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ ) def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ ) def UpperCamelCase__ (self ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCAmelCase_ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def UpperCamelCase__ (self ) -> str: """simple docstring""" pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def UpperCamelCase__ (self ) -> int: """simple docstring""" pass @unittest.skip(reason='Mask2Former is not a generative model' ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def UpperCamelCase__ (self ) -> Dict: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def UpperCamelCase__ (self ) -> int: """simple docstring""" pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCamelCase__ (self ) -> Optional[int]: """simple docstring""" pass def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(lowerCAmelCase_ ) UpperCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ = [*signature.parameters.keys()] UpperCAmelCase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) @slow def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" for model_name in ["facebook/mask2former-swin-small-coco-instance"]: UpperCAmelCase__ = MaskaFormerModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = (self.model_tester.min_size,) * 2 UpperCAmelCase__ = { 'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase_ ), 'mask_labels': torch.randn((2, 10, *size) , device=lowerCAmelCase_ ), 'class_labels': torch.zeros(2 , 10 , device=lowerCAmelCase_ ).long(), } UpperCAmelCase__ = self.model_tester.get_config() UpperCAmelCase__ = MaskaFormerForUniversalSegmentation(lowerCAmelCase_ ).to(lowerCAmelCase_ ) UpperCAmelCase__ = model(**lowerCAmelCase_ ) self.assertTrue(outputs.loss is not None ) def UpperCamelCase__ (self ) -> Dict: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ ) def UpperCamelCase__ (self ) -> Dict: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ ) UpperCAmelCase__ = model(**lowerCAmelCase_ , output_attentions=lowerCAmelCase_ ) self.assertTrue(outputs.attentions is not None ) def UpperCamelCase__ (self ) -> str: """simple docstring""" if not self.model_tester.is_training: return UpperCAmelCase__ = self.all_model_classes[1] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() UpperCAmelCase__ = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() UpperCAmelCase__ = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).loss loss.backward() def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ = self.all_model_classes[1] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ ) model.train() UpperCAmelCase__ = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ) UpperCAmelCase__ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() UpperCAmelCase__ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() UpperCAmelCase__ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() UpperCAmelCase__ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCAmelCase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCamelCase = 1e-4 def UpperCamelCase_( ) -> Any: UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class lowercase ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase__ (self ) -> Dict: """simple docstring""" return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase_ ) UpperCAmelCase__ = self.default_image_processor UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ ) UpperCAmelCase__ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCAmelCase_ , (1, 3, 384, 384) ) with torch.no_grad(): UpperCAmelCase__ = model(**lowerCAmelCase_ ) UpperCAmelCase__ = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCAmelCase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) UpperCAmelCase__ = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCAmelCase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) UpperCAmelCase__ = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCAmelCase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase_ ).eval() UpperCAmelCase__ = self.default_image_processor UpperCAmelCase__ = prepare_img() UpperCAmelCase__ = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ ) UpperCAmelCase__ = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCAmelCase_ , (1, 3, 384, 384) ) with torch.no_grad(): UpperCAmelCase__ = model(**lowerCAmelCase_ ) # masks_queries_logits UpperCAmelCase__ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) UpperCAmelCase__ = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] UpperCAmelCase__ = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) # class_queries_logits UpperCAmelCase__ = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) UpperCAmelCase__ = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCAmelCase_ ).eval() UpperCAmelCase__ = self.default_image_processor UpperCAmelCase__ = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) UpperCAmelCase__ = inputs['pixel_values'].to(lowerCAmelCase_ ) UpperCAmelCase__ = [el.to(lowerCAmelCase_ ) for el in inputs['mask_labels']] UpperCAmelCase__ = [el.to(lowerCAmelCase_ ) for el in inputs['class_labels']] with torch.no_grad(): UpperCAmelCase__ = model(**lowerCAmelCase_ ) self.assertTrue(outputs.loss is not None )
146
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" _a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _a = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) _a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits _a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean() _a = -(labels.shape[-1] * loss.item()) _a = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
22
0
import baseaa def __lowercase ( _UpperCamelCase ) ->str: """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" return baseaa.baadecode(_UpperCamelCase ).decode('''utf-8''' ) if __name__ == "__main__": __a = 'Hello World!' __a = baseaa_encode(test) print(encoded) __a = baseaa_decode(encoded) print(decoded)
319
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _snake_case : Optional[Any] = 8 def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ): '''simple docstring''' _a = x.device _a = (x * 255).int().clamp(0 , 255 ) _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' ) _a = ((x & mask) != 0).float() _a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' ) _a = bits * 2 - 1 return bits def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ): '''simple docstring''' _a = x.device _a = (x > 0).int() _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 ) _a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 255).clamp(0.0 , 1.0 ) def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _a = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _a = self.alphas_cumprod[timestep] _a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _a = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) _a = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu''' _a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase ) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise _a = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ): '''simple docstring''' _a = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 ) else: _a = None # 1. compute alphas, betas _a = self.alphas_cumprod[t] _a = self.alphas_cumprod[t - 1] if t > 0 else self.one _a = 1 - alpha_prod_t _a = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _a = model_output else: raise ValueError(f'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _a = 0 if t > 0: _a = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device ) _a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise _a = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) class A ( _a ): def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int: """simple docstring""" super().__init__() _a = bit_scale _a = ( ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) @torch.no_grad() def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" _a = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , ) _a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale _a = latents.to(self.device ) self.scheduler.set_timesteps(lowerCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample # compute the previous noisy sample x_t -> x_t-1 _a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample _a = bits_to_decimal(lowerCAmelCase_ ) if output_type == "pil": _a = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
22
0
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors SCREAMING_SNAKE_CASE :Any = logging.getLogger(__name__) class __lowerCAmelCase ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 'sequence-classification' def __init__( self : Tuple , _lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" if type(lowerCAmelCase_ ) == dict: snake_case_ = Namespace(**lowerCAmelCase_ ) snake_case_ = glue_output_modes[hparams.task] snake_case_ = glue_tasks_num_labels[hparams.task] super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , self.mode ) def lowerCAmelCase__ ( self : Any , **_lowerCAmelCase : Any ) -> int: """simple docstring""" return self.model(**lowerCAmelCase_ ) def lowerCAmelCase__ ( self : str , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> Union[str, Any]: """simple docstring""" snake_case_ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: snake_case_ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None snake_case_ = self(**lowerCAmelCase_ ) snake_case_ = outputs[0] snake_case_ = self.trainer.lr_schedulers[0]["scheduler"] snake_case_ = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" snake_case_ = self.hparams snake_case_ = processors[args.task]() snake_case_ = processor.get_labels() for mode in ["train", "dev"]: snake_case_ = self._feature_file(lowerCAmelCase_ ) if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , lowerCAmelCase_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) snake_case_ = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) snake_case_ = convert_examples_to_features( lowerCAmelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : bool = False ) -> DataLoader: """simple docstring""" snake_case_ = "dev" if mode == "test" else mode snake_case_ = self._feature_file(lowerCAmelCase_ ) logger.info("Loading features from cached file %s" , lowerCAmelCase_ ) snake_case_ = torch.load(lowerCAmelCase_ ) snake_case_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) snake_case_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) snake_case_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": snake_case_ = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": snake_case_ = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , batch_size=lowerCAmelCase_ , shuffle=lowerCAmelCase_ , ) def lowerCAmelCase__ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Optional[int]: """simple docstring""" snake_case_ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: snake_case_ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None snake_case_ = self(**lowerCAmelCase_ ) snake_case_ , snake_case_ = outputs[:2] snake_case_ = logits.detach().cpu().numpy() snake_case_ = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Any ) -> tuple: """simple docstring""" snake_case_ = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() snake_case_ = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": snake_case_ = np.argmax(lowerCAmelCase_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": snake_case_ = np.squeeze(lowerCAmelCase_ ) snake_case_ = np.concatenate([x["target"] for x in outputs] , axis=0 ) snake_case_ = [[] for _ in range(out_label_ids.shape[0] )] snake_case_ = [[] for _ in range(out_label_ids.shape[0] )] snake_case_ = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , lowerCAmelCase_ , lowerCAmelCase_ )} snake_case_ = dict(results.items() ) snake_case_ = results return ret, preds_list, out_label_list def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : list ) -> dict: """simple docstring""" snake_case_ , snake_case_ , snake_case_ = self._eval_end(lowerCAmelCase_ ) snake_case_ = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : str ) -> dict: """simple docstring""" snake_case_ , snake_case_ , snake_case_ = self._eval_end(lowerCAmelCase_ ) snake_case_ = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCAmelCase__ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> List[Any]: """simple docstring""" BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_ ) parser.add_argument( "--max_seq_length" , default=1_2_8 , type=lowerCAmelCase_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=lowerCAmelCase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def _lowerCAmelCase ( )->Optional[Any]: '''simple docstring''' snake_case_ = argparse.ArgumentParser() add_generic_args(lowerCAmelCase_ , os.getcwd() ) snake_case_ = GLUETransformer.add_model_specific_args(lowerCAmelCase_ , os.getcwd() ) snake_case_ = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: snake_case_ = os.path.join( "./results" , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , ) os.makedirs(args.output_dir ) snake_case_ = GLUETransformer(lowerCAmelCase_ ) snake_case_ = generic_train(lowerCAmelCase_ , lowerCAmelCase_ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: snake_case_ = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=lowerCAmelCase_ ) ) snake_case_ = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(lowerCAmelCase_ ) if __name__ == "__main__": main()
283
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Any = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A ( _a ): lowercase_ = 'roformer' def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) _a = vocab_size _a = hidden_size if embedding_size is None else embedding_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = layer_norm_eps _a = rotary_value _a = use_cache class A ( _a ): @property def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} _a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
22
0
from argparse import ArgumentParser from . import BaseTransformersCLICommand def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ): return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class lowercase ( _a ): @staticmethod def __snake_case( _UpperCamelCase : ArgumentParser ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = parser.add_parser("download" ) download_parser.add_argument( "--cache-dir" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="Path to location to store the models" ) download_parser.add_argument( "--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" ) download_parser.add_argument( "--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine" , ) download_parser.add_argument("model" , type=lowerCAmelCase_ , help="Name of the model to download" ) download_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self : Any , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : bool ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = model SCREAMING_SNAKE_CASE = cache SCREAMING_SNAKE_CASE = force SCREAMING_SNAKE_CASE = trust_remote_code def __snake_case( self : int ) -> List[str]: '''simple docstring''' from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
403
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A : lowercase_ = 42 lowercase_ = 42 class A : def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str: """simple docstring""" _a = [[] for _ in range(lowerCAmelCase_ )] _a = size def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex] ) @property def __lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" return self._size def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict: """simple docstring""" if weight not in (0, 1): raise ValueError('''Edge weight must be either 0 or 1.''' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('''Vertex indexes must be in [0; size).''' ) self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None: """simple docstring""" _a = deque([start_vertex] ) _a = [None] * self.size _a = 0 while queue: _a = queue.popleft() _a = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _a = current_distance + edge.weight _a = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and new_distance >= dest_vertex_distance ): continue _a = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('''No path from start_vertex to finish_vertex.''' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
22
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def snake_case (UpperCAmelCase__ ) -> str: UpperCamelCase_: Optional[Any] = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 1_8, 2] UpperCamelCase_: Optional[Any] = True if 'large' in model_name or 'huge' in model_name else False UpperCamelCase_: str = True if 'large' in model_name or 'huge' in model_name else False UpperCamelCase_: Optional[int] = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: UpperCamelCase_: int = [3, 3, 3, 3] UpperCamelCase_: Dict = [5, 5, 5, 5] elif "fl4" in model_name: UpperCamelCase_: Optional[Any] = [4, 4, 4, 4] UpperCamelCase_: Dict = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: UpperCamelCase_: List[Any] = [3, 3, 3, 3] if "lrf" in model_name: UpperCamelCase_: Optional[int] = [3, 3, 3, 3] else: UpperCamelCase_: str = [2, 2, 2, 2] if "tiny" in model_name: UpperCamelCase_: Optional[int] = 9_6 elif "small" in model_name: UpperCamelCase_: Any = 9_6 elif "base" in model_name: UpperCamelCase_: List[str] = 1_2_8 elif "large" in model_name: UpperCamelCase_: List[str] = 1_9_2 elif "xlarge" in model_name: UpperCamelCase_: str = 2_5_6 elif "huge" in model_name: UpperCamelCase_: Union[str, Any] = 3_5_2 # set label information UpperCamelCase_: Optional[Any] = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: UpperCamelCase_: Tuple = 'imagenet-22k-id2label.json' else: UpperCamelCase_: Any = 'imagenet-1k-id2label.json' UpperCamelCase_: Optional[int] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) , 'r' ) ) UpperCamelCase_: Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} UpperCamelCase_: List[Any] = {v: k for k, v in idalabel.items()} UpperCamelCase_: Tuple = FocalNetConfig( embed_dim=UpperCAmelCase__ , depths=UpperCAmelCase__ , focal_levels=UpperCAmelCase__ , focal_windows=UpperCAmelCase__ , use_conv_embed=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ , use_post_layernorm=UpperCAmelCase__ , use_layerscale=UpperCAmelCase__ , ) return config def snake_case (UpperCAmelCase__ ) -> Tuple: if "patch_embed.proj" in name: UpperCamelCase_: List[str] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: UpperCamelCase_: Any = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: UpperCamelCase_: Dict = 'encoder.' + name if "encoder.layers" in name: UpperCamelCase_: List[str] = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: UpperCamelCase_: Dict = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: UpperCamelCase_: Any = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: UpperCamelCase_: List[Any] = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: UpperCamelCase_: str = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: UpperCamelCase_: str = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": UpperCamelCase_: Union[str, Any] = 'layernorm.weight' if name == "norm.bias": UpperCamelCase_: List[Any] = 'layernorm.bias' if "head" in name: UpperCamelCase_: Any = name.replace('head' , 'classifier' ) else: UpperCamelCase_: str = 'focalnet.' + name return name def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ) -> Optional[int]: UpperCamelCase_: Dict = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on UpperCamelCase_: Any = model_name_to_url[model_name] print('Checkpoint URL: ' , UpperCAmelCase__ ) UpperCamelCase_: Any = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): UpperCamelCase_: Optional[Any] = state_dict.pop(UpperCAmelCase__ ) UpperCamelCase_: Optional[int] = val UpperCamelCase_: List[Any] = get_focalnet_config(UpperCAmelCase__ ) UpperCamelCase_: str = FocalNetForImageClassification(UpperCAmelCase__ ) model.eval() # load state dict model.load_state_dict(UpperCAmelCase__ ) # verify conversion UpperCamelCase_: str = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase_: Union[str, Any] = BitImageProcessor( do_resize=UpperCAmelCase__ , size={'shortest_edge': 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase__ , crop_size=2_2_4 , do_normalize=UpperCAmelCase__ , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ , ) UpperCamelCase_: int = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ) UpperCamelCase_: int = processor(images=UpperCAmelCase__ , return_tensors='pt' ) UpperCamelCase_: List[str] = transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) UpperCamelCase_: Dict = image_transforms(UpperCAmelCase__ ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , UpperCAmelCase__ , atol=1E-4 ) UpperCamelCase_: Dict = model(**UpperCAmelCase__ ) UpperCamelCase_: List[Any] = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": UpperCamelCase_: List[str] = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": UpperCamelCase_: Optional[Any] = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": UpperCamelCase_: int = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": UpperCamelCase_: Any = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": UpperCamelCase_: int = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": UpperCamelCase_: int = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase__ ) processor.save_pretrained(UpperCAmelCase__ ) if push_to_hub: print(F'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(F'''{model_name}''' ) processor.push_to_hub(F'''{model_name}''' ) if __name__ == "__main__": A_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) A_ : Optional[int] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
57
'''simple docstring''' from math import pi, sqrt def snake_case_ (UpperCamelCase : float ): '''simple docstring''' if num <= 0: raise ValueError('''math domain error''' ) if num > 171.5: raise OverflowError('''math range error''' ) elif num - int(UpperCamelCase ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(UpperCamelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def snake_case_ (): '''simple docstring''' assert gamma(0.5 ) == sqrt(UpperCamelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _snake_case : Optional[Any] = 1.0 while num: _snake_case : Dict = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
22
0
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch __A = logging.get_logger(__name__) class _lowerCAmelCase ( _a ): """simple docstring""" __magic_name__ :Any = ["""pixel_values"""] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**lowerCAmelCase_ ) lowerCAmelCase__ :Optional[int] = size if size is not None else {'shortest_edge': 2_5_6} lowerCAmelCase__ :Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) lowerCAmelCase__ :Optional[int] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4} lowerCAmelCase__ :Optional[int] = get_size_dict(lowerCAmelCase_ , param_name='crop_size' ) lowerCAmelCase__ :str = do_resize lowerCAmelCase__ :str = size lowerCAmelCase__ :int = resample lowerCAmelCase__ :List[str] = do_center_crop lowerCAmelCase__ :List[str] = crop_size lowerCAmelCase__ :Optional[int] = do_rescale lowerCAmelCase__ :List[Any] = rescale_factor lowerCAmelCase__ :Optional[int] = do_normalize lowerCAmelCase__ :Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase__ :List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :List[str] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) lowerCAmelCase__ :Dict = get_resize_output_image_size(lowerCAmelCase_ , size=size['shortest_edge'] , default_to_square=lowerCAmelCase_ ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ :Any = size if size is not None else self.size lowerCAmelCase__ :Optional[int] = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) lowerCAmelCase__ :int = resample if resample is not None else self.resample lowerCAmelCase__ :Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ :Union[str, Any] = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ :Union[str, Any] = get_size_dict(lowerCAmelCase_ , param_name='crop_size' ) lowerCAmelCase__ :Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ :List[Any] = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ :Tuple = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ :Optional[Any] = image_std if image_std is not None else self.image_std lowerCAmelCase__ :List[Any] = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCAmelCase__ :int = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: lowerCAmelCase__ :int = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: lowerCAmelCase__ :List[str] = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: lowerCAmelCase__ :Optional[Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: lowerCAmelCase__ :Optional[Any] = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] lowerCAmelCase__ :List[str] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] lowerCAmelCase__ :Union[str, Any] = {'pixel_values': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :Tuple = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(lowerCAmelCase_ ): lowerCAmelCase__ :str = target_sizes.numpy() lowerCAmelCase__ :Union[str, Any] = [] for idx in range(len(lowerCAmelCase_ ) ): lowerCAmelCase__ :Tuple = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowerCAmelCase_ ) lowerCAmelCase__ :str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase_ ) else: lowerCAmelCase__ :str = logits.argmax(dim=1 ) lowerCAmelCase__ :List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
93
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
22
0
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCamelCase : int =logging.get_logger(__name__) _UpperCamelCase : Any ={ 'nvidia/segformer-b0-finetuned-ade-512-512': ( 'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class _SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'segformer' def __init__( self , _snake_case=3 , _snake_case=4 , _snake_case=[2, 2, 2, 2] , _snake_case=[8, 4, 2, 1] , _snake_case=[32, 64, 1_60, 2_56] , _snake_case=[7, 3, 3, 3] , _snake_case=[4, 2, 2, 2] , _snake_case=[1, 2, 5, 8] , _snake_case=[4, 4, 4, 4] , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.0_2 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=2_56 , _snake_case=2_55 , **_snake_case , ): """simple docstring""" super().__init__(**lowerCAmelCase_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( '''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be''' ''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , lowerCAmelCase_ , ) __lowerCamelCase = num_channels __lowerCamelCase = num_encoder_blocks __lowerCamelCase = depths __lowerCamelCase = sr_ratios __lowerCamelCase = hidden_sizes __lowerCamelCase = patch_sizes __lowerCamelCase = strides __lowerCamelCase = mlp_ratios __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = classifier_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = drop_path_rate __lowerCamelCase = layer_norm_eps __lowerCamelCase = decoder_hidden_size __lowerCamelCase = kwargs.get('''reshape_last_stage''' , lowerCAmelCase_ ) __lowerCamelCase = semantic_loss_ignore_index class _SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" SCREAMING_SNAKE_CASE_ = version.parse('1.11' ) @property def _lowerCamelCase ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _lowerCamelCase ( self ): """simple docstring""" return 1E-4 @property def _lowerCamelCase ( self ): """simple docstring""" return 12
316
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' _snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' _snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def snake_case_ (UpperCamelCase : Tuple ): '''simple docstring''' def remove_articles(UpperCamelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase ) def white_space_fix(UpperCamelCase : Union[str, Any] ): return " ".join(text.split() ) def remove_punc(UpperCamelCase : str ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCamelCase : Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) ) def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ): '''simple docstring''' _a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )] return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100 def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(UpperCamelCase ) _a = Counter(UpperCamelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(UpperCamelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = keeptmpscorea / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(UpperCamelCase ) > 0: _a = deltmpscorea / len(UpperCamelCase ) # ADDITION _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = set(UpperCamelCase ) & set(UpperCamelCase ) _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' _a = len(UpperCamelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ): '''simple docstring''' if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ): sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] ) _a = sari_score / len(UpperCamelCase ) return 100 * sari_score def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ): '''simple docstring''' _a = len(references[0] ) if any(len(UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(UpperCamelCase )] _a = sacrebleu.corpus_bleu( UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict: """simple docstring""" _a = {} result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) return result
22
0
import pytest __snake_case :List[Any] ='__dummy_dataset1__' __snake_case :Optional[int] ='\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def lowerCamelCase_ ( ) -> Optional[int]: '''simple docstring''' return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowerCamelCase_ ( ) -> Optional[int]: '''simple docstring''' return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> int: '''simple docstring''' A = dataset_loading_script_name A = tmp_path / 'datasets' / script_name script_dir.mkdir(parents=lowerCAmelCase__ ) A = script_dir / F'''{script_name}.py''' with open(lowerCAmelCase__ , 'w' ) as f: f.write(lowerCAmelCase__ ) return str(lowerCAmelCase__ )
106
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): _snake_case : Tuple = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: _snake_case : Any = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def snake_case_ (UpperCamelCase : Optional[int] ): '''simple docstring''' _a = (images / 2 + 0.5).clamp(0 , 1 ) _a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _a = numpy_to_pil(UpperCamelCase ) return images def snake_case_ (UpperCamelCase : str ): '''simple docstring''' if images.ndim == 3: _a = images[None, ...] _a = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: _a = [Image.fromarray(UpperCamelCase ) for image in images] return pil_images
22
0
import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib _UpperCamelCase = threading.Lock() _UpperCamelCase = None _UpperCamelCase = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } _UpperCamelCase = logging.WARNING _UpperCamelCase = True def lowerCAmelCase__( ) -> Tuple: __snake_case : Optional[Any] = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ", ".join(log_levels.keys() ) }""" ) return _default_log_level def lowerCAmelCase__( ) -> Tuple: return __name__.split("." )[0] def lowerCAmelCase__( ) -> Dict: return logging.getLogger(_get_library_name() ) def lowerCAmelCase__( ) -> int: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return __snake_case : Tuple = logging.StreamHandler() # Set sys.stderr as stream. __snake_case : Any = sys.stderr.flush # Apply our default configuration to the library root logger. __snake_case : str = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) __snake_case : List[Any] = False def lowerCAmelCase__( ) -> List[str]: global _default_handler with _lock: if not _default_handler: return __snake_case : str = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) __snake_case : Optional[Any] = None def lowerCAmelCase__( ) -> List[Any]: return log_levels def lowerCAmelCase__( lowercase : Optional[str] = None ) -> List[str]: if name is None: __snake_case : Any = _get_library_name() _configure_library_root_logger() return logging.getLogger(lowercase ) def lowerCAmelCase__( ) -> str: _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def lowerCAmelCase__( lowercase : int ) -> str: _configure_library_root_logger() _get_library_root_logger().setLevel(lowercase ) def lowerCAmelCase__( ) -> List[Any]: return set_verbosity(lowercase ) def lowerCAmelCase__( ) -> List[Any]: return set_verbosity(lowercase ) def lowerCAmelCase__( ) -> Tuple: return set_verbosity(lowercase ) def lowerCAmelCase__( ) -> str: return set_verbosity(lowercase ) def lowerCAmelCase__( ) -> Optional[Any]: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def lowerCAmelCase__( ) -> str: _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def lowerCAmelCase__( lowercase : logging.Handler ) -> Tuple: _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(lowercase ) def lowerCAmelCase__( lowercase : logging.Handler ) -> Optional[Any]: _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(lowercase ) def lowerCAmelCase__( ) -> Any: _configure_library_root_logger() __snake_case : str = False def lowerCAmelCase__( ) -> Any: _configure_library_root_logger() __snake_case : Any = True def lowerCAmelCase__( ) -> Union[str, Any]: __snake_case : str = _get_library_root_logger().handlers for handler in handlers: __snake_case : Dict = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" ) handler.setFormatter(lowercase ) def lowerCAmelCase__( ) -> Dict: __snake_case : List[str] = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(lowercase ) def lowerCAmelCase__( self : List[str] , *lowercase : Optional[Any] , **lowercase : Optional[int] ) -> int: __snake_case : str = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , lowercase ) if no_advisory_warnings: return self.warning(*lowercase , **lowercase ) _UpperCamelCase = warning_advice @functools.lru_cache(lowercase ) def lowerCAmelCase__( self : List[str] , *lowercase : List[Any] , **lowercase : Union[str, Any] ) -> Dict: self.warning(*lowercase , **lowercase ) _UpperCamelCase = warning_once class _lowerCamelCase : """simple docstring""" def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> int: # pylint: disable=unused-argument '''simple docstring''' __snake_case : str = args[0] if args else None def __iter__( self ) -> Any: '''simple docstring''' return iter(self._iterator ) def __getattr__( self , UpperCAmelCase ) -> str: '''simple docstring''' def empty_fn(*UpperCAmelCase , **UpperCAmelCase ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ) -> int: '''simple docstring''' return self def __exit__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: '''simple docstring''' return class _lowerCamelCase : """simple docstring""" def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ ) else: return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any: '''simple docstring''' __snake_case : List[str] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ ) def UpperCAmelCase ( self ) -> str: '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() _UpperCamelCase = _tqdm_cls() def lowerCAmelCase__( ) -> List[Any]: global _tqdm_active return bool(_tqdm_active ) def lowerCAmelCase__( ) -> Union[str, Any]: global _tqdm_active __snake_case : Tuple = True hf_hub_utils.enable_progress_bars() def lowerCAmelCase__( ) -> Tuple: global _tqdm_active __snake_case : Optional[Any] = False hf_hub_utils.disable_progress_bars()
243
'''simple docstring''' import requests def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ): '''simple docstring''' _a = {'''Content-Type''': '''application/json'''} _a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase ) if response.status_code != 200: _a = ( '''Request to slack returned an error ''' f'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
22
0
'''simple docstring''' import argparse import copy def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Union[str, Any] = {} with open(lowerCamelCase_ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: lowerCAmelCase__ : int = [] _list.append([line.split()[1], line.split()[2]] ) lowerCAmelCase__ : List[Any] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: lowerCAmelCase__ : Union[str, Any] = [] _list.append([line.split()[0], line.split()[2]] ) lowerCAmelCase__ : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ ) as f: lowerCAmelCase__ : str = f.read(1 ) lowerCAmelCase__ : Dict = start_node lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = start_node lowerCAmelCase__ : int = 0 while visiting not in first_solution: lowerCAmelCase__ : Dict = 1_0_0_0_0 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(lowerCamelCase_ ) and k[0] not in first_solution: lowerCAmelCase__ : Optional[int] = k[1] lowerCAmelCase__ : Optional[int] = k[0] first_solution.append(lowerCamelCase_ ) lowerCAmelCase__ : Optional[int] = distance_of_first_solution + int(lowerCamelCase_ ) lowerCAmelCase__ : Tuple = best_node first_solution.append(lowerCamelCase_ ) lowerCAmelCase__ : List[Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 lowerCAmelCase__ : int = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0_0_0_0 ) return first_solution, distance_of_first_solution def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : Optional[int] = [] for n in solution[1:-1]: lowerCAmelCase__ : List[Any] = solution.index(lowerCamelCase_ ) for kn in solution[1:-1]: lowerCAmelCase__ : Optional[int] = solution.index(lowerCamelCase_ ) if n == kn: continue lowerCAmelCase__ : str = copy.deepcopy(lowerCamelCase_ ) lowerCAmelCase__ : List[str] = kn lowerCAmelCase__ : Optional[Any] = n lowerCAmelCase__ : int = 0 for k in _tmp[:-1]: lowerCAmelCase__ : Optional[Any] = _tmp[_tmp.index(lowerCamelCase_ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: lowerCAmelCase__ : Dict = distance + int(i[1] ) _tmp.append(lowerCamelCase_ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) lowerCAmelCase__ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda lowerCamelCase_ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" lowerCAmelCase__ : List[Any] = 1 lowerCAmelCase__ : Optional[Any] = first_solution lowerCAmelCase__ : List[Any] = [] lowerCAmelCase__ : Union[str, Any] = distance_of_first_solution lowerCAmelCase__ : str = solution while count <= iters: lowerCAmelCase__ : Any = find_neighborhood(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ : Optional[int] = 0 lowerCAmelCase__ : Union[str, Any] = neighborhood[index_of_best_solution] lowerCAmelCase__ : Dict = len(lowerCamelCase_ ) - 1 lowerCAmelCase__ : List[str] = False while not found: lowerCAmelCase__ : Union[str, Any] = 0 while i < len(lowerCamelCase_ ): if best_solution[i] != solution[i]: lowerCAmelCase__ : List[Any] = best_solution[i] lowerCAmelCase__ : str = solution[i] break lowerCAmelCase__ : Tuple = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) lowerCAmelCase__ : Dict = True lowerCAmelCase__ : Any = best_solution[:-1] lowerCAmelCase__ : Optional[int] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: lowerCAmelCase__ : Optional[int] = cost lowerCAmelCase__ : Any = solution else: lowerCAmelCase__ : Any = index_of_best_solution + 1 lowerCAmelCase__ : Union[str, Any] = neighborhood[index_of_best_solution] if len(lowerCamelCase_ ) >= size: tabu_list.pop(0 ) lowerCAmelCase__ : Optional[int] = count + 1 return best_solution_ever, best_cost def UpperCAmelCase_ ( lowerCamelCase_=None ): """simple docstring""" lowerCAmelCase__ : Dict = generate_neighbours(args.File ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = generate_first_solution( args.File , lowerCamelCase_ ) lowerCAmelCase__ , lowerCAmelCase__ : str = tabu_search( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.Iterations , args.Size , ) print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' ) if __name__ == "__main__": snake_case = argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
378
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _snake_case : Tuple = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''shortest_edge''': 2_56} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any: """simple docstring""" _a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase_ ): _a = target_sizes.numpy() _a = [] for idx in range(len(lowerCAmelCase_ ) ): _a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ ) _a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase_ ) else: _a = logits.argmax(dim=1 ) _a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
"""simple docstring""" def lowercase (_snake_case = 10**9 ) -> str: '''simple docstring''' __UpperCamelCase = 1 __UpperCamelCase = 2 __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
505
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ): '''simple docstring''' _a = {} if train_file is not None: _a = [train_file] if eval_file is not None: _a = [eval_file] if test_file is not None: _a = [test_file] _a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase ) _a = list(ds[list(files.keys() )[0]].features.keys() ) _a = features_name.pop(UpperCamelCase ) _a = list(set(ds[list(files.keys() )[0]][label_name] ) ) _a = {label: i for i, label in enumerate(UpperCamelCase )} _a = tokenizer.model_input_names _a = {} if len(UpperCamelCase ) == 1: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , ) elif len(UpperCamelCase ) == 2: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _snake_case : str = logging.getLogger(__name__) @dataclass class A : lowercase_ = field(metadata={'help': 'Which column contains the label'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} ) lowercase_ = field( default=128 ,metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) lowercase_ = field( default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A : lowercase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowercase_ = field( default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,) def snake_case_ (): '''simple docstring''' _a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _a , _a , _a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _a , _a , _a , _a = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _a = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict: _a = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _a = TFTrainer( model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _a = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _a = trainer.evaluate() _a = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(UpperCamelCase ) return results if __name__ == "__main__": main()
22
0
def UpperCamelCase_( snake_case__: int ) -> Dict: UpperCAmelCase__ = int(snake_case__ ) if n_element < 1: UpperCAmelCase__ = ValueError('a should be a positive number' ) raise my_error UpperCAmelCase__ = [1] UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = (0, 0, 0) UpperCAmelCase__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": _UpperCamelCase = input('''Enter the last number (nth term) of the Hamming Number Series: ''') print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''') _UpperCamelCase = hamming(int(n)) print('''-----------------------------------------------------''') print(F"""The list with nth numbers is: {hamming_numbers}""") print('''-----------------------------------------------------''')
146
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( _a ,unittest.TestCase ): lowercase_ = LEDTokenizer lowercase_ = LEDTokenizerFast lowercase_ = True def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" super().setUp() _a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a = {'''unk_token''': '''<unk>'''} _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]: """simple docstring""" return "lower newer", "lower newer" @cached_property def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def __lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _a = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , lowerCAmelCase_ ) self.assertIn('''attention_mask''' , lowerCAmelCase_ ) self.assertNotIn('''labels''' , lowerCAmelCase_ ) self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" _a = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer( ['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = ['''A long paragraph for summarization.'''] _a = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' ) _a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' ) _a = inputs['''input_ids'''] _a = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = ['''Summary of the text.''', '''Another summary.'''] _a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']] _a = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" pass def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = '''A, <mask> AllenNLP sentence.''' _a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
22
0
from __future__ import annotations __a = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, ) ->Dict: """simple docstring""" lowercase : Optional[int] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCamelCase ) ) ] # the reference grid lowercase : List[str] = 1 lowercase : str = [ [0 for col in range(len(grid[0] ) )] for row in range(len(_UpperCamelCase ) ) ] # the action grid lowercase : Union[str, Any] = init[0] lowercase : Dict = init[1] lowercase : List[Any] = 0 lowercase : Optional[Any] = g + heuristic[x][y] # cost from starting cell to destination cell lowercase : str = [[f, g, x, y]] lowercase : Optional[int] = False # flag that is set when search is complete lowercase : str = False # flag set if we can't find expand while not found and not resign: if len(_UpperCamelCase ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowercase : Tuple = cell.pop() lowercase : Optional[int] = next_cell[2] lowercase : Optional[int] = next_cell[3] lowercase : int = next_cell[1] if x == goal[0] and y == goal[1]: lowercase : List[str] = True else: for i in range(len(_UpperCamelCase ) ): # to try out different valid actions lowercase : int = x + DIRECTIONS[i][0] lowercase : Tuple = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(_UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowercase : str = g + cost lowercase : Optional[int] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowercase : List[str] = 1 lowercase : str = i lowercase : Any = [] lowercase : Tuple = goal[0] lowercase : Tuple = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowercase : Union[str, Any] = x - DIRECTIONS[action[x][y]][0] lowercase : Union[str, Any] = y - DIRECTIONS[action[x][y]][1] lowercase : Any = xa lowercase : Any = ya invpath.append([x, y] ) lowercase : List[Any] = [] for i in range(len(_UpperCamelCase ) ): path.append(invpath[len(_UpperCamelCase ) - 1 - i] ) return path, action if __name__ == "__main__": __a = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] __a = [0, 0] # all coordinates are given in format [y,x] __a = [len(grid) - 1, len(grid[0]) - 1] __a = 1 # the cost map which pushes the path closer to the goal __a = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): __a = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map __a = 99 __a = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
319
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def snake_case_ (UpperCamelCase : SplitDict ): '''simple docstring''' _a = split_dict._to_yaml_list() assert len(UpperCamelCase ) == len(UpperCamelCase ) _a = SplitDict._from_yaml_list(UpperCamelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump _a = None # the split name of split_dict takes over the name of the split info object _a = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] ) def snake_case_ (UpperCamelCase : List[str] ): '''simple docstring''' _a = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
22
0
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :int , lowerCAmelCase_ :Union[str, Any]=None )->Dict: '''simple docstring''' assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ = nn.Parameter(lowerCAmelCase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ = nn.Parameter(lowerCAmelCase_ ) def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Optional[int] )->Optional[int]: '''simple docstring''' snake_case_ = np.asarray(weights[0] ) snake_case_ = np.asarray(weights[1] ) snake_case_ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , ) def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :int )->Optional[Any]: '''simple docstring''' snake_case_ = np.asarray(weights[0] ) snake_case_ = np.asarray(weights[1] ) snake_case_ = np.asarray(weights[2] ) snake_case_ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , ) def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :Any , lowerCAmelCase_ :int )->int: '''simple docstring''' snake_case_ = weights[0][0][0] snake_case_ = np.asarray(layer_norm_a[0] ) snake_case_ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , ) # lsh weights + output snake_case_ = weights[0][1] if len(lowerCAmelCase_ ) < 4: set_layer_weights_in_torch_lsh(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ ) else: set_layer_weights_in_torch_local(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ ) # intermediate weighs snake_case_ = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCAmelCase_ ) == 4: snake_case_ = intermediate_weights[2] # layernorm 2 snake_case_ = np.asarray(intermediate_weights[0][0] ) snake_case_ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , ) # intermediate dense snake_case_ = np.asarray(intermediate_weights[1][0] ) snake_case_ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , ) # intermediate out snake_case_ = np.asarray(intermediate_weights[4][0] ) snake_case_ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , ) def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :str , lowerCAmelCase_ :List[Any] )->Union[str, Any]: '''simple docstring''' snake_case_ = torch_model.reformer # word embeds snake_case_ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase_ ) , ) if isinstance(weights[3] , lowerCAmelCase_ ): snake_case_ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ = nn.Parameter(torch.tensor(lowerCAmelCase_ ) ) snake_case_ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCAmelCase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # output layer norm snake_case_ = np.asarray(weights[7][0] ) snake_case_ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , ) # output embeddings snake_case_ = np.asarray(weights[9][0] ) snake_case_ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , ) def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :Tuple )->Optional[int]: '''simple docstring''' snake_case_ = ReformerConfig.from_json_file(lowerCAmelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ = ReformerModelWithLMHead(lowerCAmelCase_ ) with open(lowerCAmelCase_ , "rb" ) as f: snake_case_ = pickle.load(lowerCAmelCase_ )["weights"] set_model_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCAmelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) SCREAMING_SNAKE_CASE :Tuple = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
283
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n' class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" _a = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _a = self.diffusers_dir shutil.copy( os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _a = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]: """simple docstring""" _a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: _a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result _a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) _a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ ) _a = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f: f.write(lowerCAmelCase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ ) with open(lowerCAmelCase_ , '''r''' ) as f: self.assertTrue(f.read() , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , ) # Copy consistency with a really long name _a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
22
0
import numpy as np def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ): SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / h ) ) SCREAMING_SNAKE_CASE = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE = ya SCREAMING_SNAKE_CASE = xa for k in range(UpperCAmelCase__ ): SCREAMING_SNAKE_CASE = f(UpperCAmelCase__ , y[k] ) SCREAMING_SNAKE_CASE = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE = f(x + h , y[k] + h * ka ) SCREAMING_SNAKE_CASE = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
403
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _snake_case : Tuple = logging.get_logger(__name__) _snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _snake_case : List[Any] = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } _snake_case : Union[str, Any] = { 'squeezebert/squeezebert-uncased': 512, 'squeezebert/squeezebert-mnli': 512, 'squeezebert/squeezebert-mnli-headless': 512, } _snake_case : Tuple = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class A ( _a ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = SqueezeBertTokenizer def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int: """simple docstring""" super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars ): _a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**lowerCAmelCase_ ) _a = do_lower_case def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]: """simple docstring""" _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" _a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ )
22
0
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase: """simple docstring""" @staticmethod def _a ( *_lowerCamelCase , **_lowerCamelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" @require_torch def _a ( self ): UpperCamelCase_: Dict = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , ) UpperCamelCase_: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) UpperCamelCase_: int = image_classifier(lowerCAmelCase_ , candidate_labels=['a', 'b', 'c'] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(lowerCAmelCase_ ) , [ [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}], [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}], ] , ) UpperCamelCase_: Dict = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , [ [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], ] , ) @require_tf def _a ( self ): UpperCamelCase_: int = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' ) UpperCamelCase_: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) UpperCamelCase_: Tuple = image_classifier(lowerCAmelCase_ , candidate_labels=['a', 'b', 'c'] ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , ) UpperCamelCase_: Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , [ [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, {'score': 0.3_3_3, 'label': ANY(lowerCAmelCase_ )}, ], ] , ) @slow @require_torch def _a ( self ): UpperCamelCase_: List[str] = pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , ) # This is an image of 2 cats with remotes and no planes UpperCamelCase_: int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) UpperCamelCase_: Tuple = image_classifier(lowerCAmelCase_ , candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ] , ) UpperCamelCase_: List[Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , [ [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ], ] * 5 , ) @slow @require_tf def _a ( self ): UpperCamelCase_: int = pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' ) # This is an image of 2 cats with remotes and no planes UpperCamelCase_: Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) UpperCamelCase_: str = image_classifier(lowerCAmelCase_ , candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ] , ) UpperCamelCase_: Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 ) self.assertEqual( nested_simplify(lowerCAmelCase_ ) , [ [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ], ] * 5 , )
57
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case : Dict = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = do_rescale _a = do_normalize _a = do_center_crop _a = crop_size _a = size _a = resample _a = rescale_factor _a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "shortest_edge" in size: _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _a = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = do_rescale if do_rescale is not None else self.do_rescale _a = do_normalize if do_normalize is not None else self.do_normalize _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ ) if not is_batched(lowerCAmelCase_ ): _a = [images] if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
22
0
"""simple docstring""" import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values __A = argparse.ArgumentParser() parser.add_argument("""--user""", type=str, default="""ubuntu""") parser.add_argument("""--host""", type=str, default="""localhost""") parser.add_argument("""--key_path""", type=str, default=None) parser.add_argument("""--instance""", type=str, default="""V100:1""") parser.add_argument("""--provider""", type=str, default="""cheapest""") parser.add_argument("""--use_spot""", type=bool, default=False) parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""") __A = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("""Cannot specify both BYO and on-demand cluster args""") __A = rh.cluster( name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path} ) else: __A = rh.cluster( name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) __A = args.example.rsplit("""/""", 1)[0] # Set up remote environment cluster.install_packages(["""pip:./"""]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
93
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case : str = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[int] = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = ['LayoutLMv3FeatureExtractor'] _snake_case : Tuple = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys _snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
22
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCamelCase : Optional[int] =logging.get_logger(__name__) _UpperCamelCase : List[str] ={ 'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json', 'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class _SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'mobilenet_v1' def __init__( self , _snake_case=3 , _snake_case=2_24 , _snake_case=1.0 , _snake_case=8 , _snake_case="relu6" , _snake_case=True , _snake_case=0.9_9_9 , _snake_case=0.0_2 , _snake_case=0.0_0_1 , **_snake_case , ): """simple docstring""" super().__init__(**lowerCAmelCase_ ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = depth_multiplier __lowerCamelCase = min_depth __lowerCamelCase = hidden_act __lowerCamelCase = tf_padding __lowerCamelCase = classifier_dropout_prob __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps class _SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" SCREAMING_SNAKE_CASE_ = version.parse('1.11' ) @property def _lowerCamelCase ( self ): """simple docstring""" return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def _lowerCamelCase ( self ): """simple docstring""" if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def _lowerCamelCase ( self ): """simple docstring""" return 1E-4
316
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A ( _a ): lowercase_ = (DDPMParallelScheduler,) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]: """simple docstring""" _a = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**lowerCAmelCase_ ) return config def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , ) def __lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5 def __lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = self.dummy_sample_deter + 0.1 _a = self.dummy_sample_deter - 0.1 _a = samplea.shape[0] _a = torch.stack([samplea, samplea, samplea] , dim=0 ) _a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ ) _a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config(prediction_type='''v_prediction''' ) _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def __lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) _a = scheduler.timesteps for i, timestep in enumerate(lowerCAmelCase_ ): if i == len(lowerCAmelCase_ ) - 1: _a = -1 else: _a = timesteps[i + 1] _a = scheduler.previous_timestep(lowerCAmelCase_ ) _a = prev_t.item() self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 51, 0] with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] _a = len(lowerCAmelCase_ ) with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
22
0
from __future__ import annotations from typing import Any def lowerCamelCase_ ( lowerCAmelCase__ : list[Any] ) -> Optional[int]: '''simple docstring''' create_state_space_tree(lowerCAmelCase__ , [] , 0 ) def lowerCamelCase_ ( lowerCAmelCase__ : list[Any] , lowerCAmelCase__ : list[Any] , lowerCAmelCase__ : int ) -> Any: '''simple docstring''' if index == len(lowerCAmelCase__ ): print(lowerCAmelCase__ ) return create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": __snake_case :list[Any] =[3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(['A', 'B', 'C']) generate_all_subsequences(seq)
106
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def snake_case_ (UpperCamelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ): '''simple docstring''' _a = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(UpperCamelCase , UpperCamelCase ) # Predict target for test data _a = xgb.predict(UpperCamelCase ) _a = predictions.reshape(len(UpperCamelCase ) , 1 ) return predictions def snake_case_ (): '''simple docstring''' _a = fetch_california_housing() _a , _a = data_handling(UpperCamelCase ) _a , _a , _a , _a = train_test_split( UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 ) _a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Error printing print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' ) print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
22
0
import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification _UpperCamelCase = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co _UpperCamelCase = 'main' # Default branch name _UpperCamelCase = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2' # One particular commit (not the top of `main`) _UpperCamelCase = 'aaaaaaa' # This commit does not exist, so we should 404. _UpperCamelCase = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684' # Sha-1 of config.json on the top of `main`, for checking purposes _UpperCamelCase = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3' @contextlib.contextmanager def lowerCAmelCase__( ) -> Tuple: print("Welcome!" ) yield print("Bye!" ) @contextlib.contextmanager def lowerCAmelCase__( ) -> Dict: print("Bonjour!" ) yield print("Au revoir!" ) class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' assert transformers.__spec__ is not None assert importlib.util.find_spec("transformers" ) is not None class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def UpperCAmelCase ( self , UpperCAmelCase ) -> int: '''simple docstring''' with ContextManagers([] ): print("Transformers are awesome!" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def UpperCAmelCase ( self , UpperCAmelCase ) -> Any: '''simple docstring''' with ContextManagers([context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" ) @unittest.mock.patch("sys.stdout" , new_callable=io.StringIO ) def UpperCAmelCase ( self , UpperCAmelCase ) -> str: '''simple docstring''' with ContextManagers([context_fr(), context_en()] ): print("Transformers are awesome!" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" ) @require_torch def UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' self.assertEqual(find_labels(lowerCAmelCase_ ) , ["labels"] ) self.assertEqual(find_labels(lowerCAmelCase_ ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(lowerCAmelCase_ ) , ["start_positions", "end_positions"] ) class _lowerCamelCase ( _a ): """simple docstring""" pass self.assertEqual(find_labels(lowerCAmelCase_ ) , ["labels"] ) @require_tf def UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.assertEqual(find_labels(lowerCAmelCase_ ) , ["labels"] ) self.assertEqual(find_labels(lowerCAmelCase_ ) , ["labels", "next_sentence_label"] ) self.assertEqual(find_labels(lowerCAmelCase_ ) , ["start_positions", "end_positions"] ) class _lowerCamelCase ( _a ): """simple docstring""" pass self.assertEqual(find_labels(lowerCAmelCase_ ) , ["labels"] ) @require_flax def UpperCAmelCase ( self ) -> Dict: '''simple docstring''' self.assertEqual(find_labels(lowerCAmelCase_ ) , [] ) self.assertEqual(find_labels(lowerCAmelCase_ ) , [] ) self.assertEqual(find_labels(lowerCAmelCase_ ) , [] ) class _lowerCamelCase ( _a ): """simple docstring""" pass self.assertEqual(find_labels(lowerCAmelCase_ ) , [] )
243
'''simple docstring''' import qiskit def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' _a = qiskit.Aer.get_backend('''aer_simulator''' ) _a = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator _a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(UpperCamelCase ) if __name__ == "__main__": _snake_case : Tuple = half_adder(1, 1) print(F'''Half Adder Output Qubit Counts: {counts}''')
22
0
'''simple docstring''' from math import pi, sqrt def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" if num <= 0: raise ValueError("math domain error" ) if num > 171.5: raise OverflowError("math range error" ) elif num - int(lowerCamelCase_ ) not in (0, 0.5): raise NotImplementedError("num must be an integer or a half-integer" ) elif num == 0.5: return sqrt(lowerCamelCase_ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def UpperCAmelCase_ ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(lowerCamelCase_ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() snake_case = 1.0 while num: snake_case = float(input("""Gamma of: """)) print(f'gamma({num}) = {gamma(num)}') print("""\nEnter 0 to exit...""")
378
'''simple docstring''' from collections.abc import Generator from math import sin def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) != 32: raise ValueError('''Input must be of length 32''' ) _a = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''08x''' )[-8:] _a = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = B'''''' for char in message: bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' ) _a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCamelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(UpperCamelCase ) , 512 ): _a = bit_string[pos : pos + 512] _a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''032b''' ) _a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCamelCase , 2 ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' return (a + b) % 2**32 def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = preprocess(UpperCamelCase ) _a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _a = 0X67452301 _a = 0Xefcdab89 _a = 0X98badcfe _a = 0X10325476 _a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCamelCase ): _a = aa _a = ba _a = ca _a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _a = d ^ (b & (c ^ d)) _a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _a = c ^ (d & (b ^ c)) _a = (5 * i + 1) % 16 elif i <= 47: _a = b ^ c ^ d _a = (3 * i + 5) % 16 else: _a = c ^ (b | not_aa(UpperCamelCase )) _a = (7 * i) % 16 _a = (f + a + added_consts[i] + block_words[g]) % 2**32 _a = d _a = c _a = b _a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) ) # Add hashed chunk to running total _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" import math def lowercase (_snake_case = 100 ) -> int: '''simple docstring''' __UpperCamelCase = sum(i * i for i in range(1 ,n + 1 ) ) __UpperCamelCase = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"""{solution() = }""")
505
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A ( unittest.TestCase ): def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]: """simple docstring""" _a = size if size is not None else {'''height''': 18, '''width''': 18} _a = parent _a = batch_size _a = num_channels _a = image_size _a = min_resolution _a = max_resolution _a = do_resize _a = size _a = do_normalize def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4], [-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A ( _a ,unittest.TestCase ): lowercase_ = ImageGPTImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _a = ImageGPTImageProcessingTester(self ) @property def __lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) ) def __lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" _a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) _a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def __lowerCAmelCase ( self : str ) -> str: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) _a = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' ) image_processor_first.to_json_file(lowerCAmelCase_ ) _a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowerCAmelCase_ ) _a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" pass def snake_case_ (): '''simple docstring''' _a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) _a = Image.open(dataset[4]['''file'''] ) _a = Image.open(dataset[5]['''file'''] ) _a = [imagea, imagea] return images @require_vision @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) _a = prepare_images() # test non-batched _a = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 10_24) ) _a = [3_06, 1_91, 1_91] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ ) # test batched _a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 10_24) ) _a = [3_03, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
22
0
from ..utils import DummyObject, requires_backends class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> str: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> Optional[int]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Tuple: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> List[str]: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> List[str]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Any: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Any: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> List[Any]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> int: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> List[str]: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> List[Any]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Tuple: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> List[str]: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> List[Any]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Any: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Tuple: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> List[str]: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> Optional[int]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> List[str]: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> int: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> str: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Tuple: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Any: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['flax'] ) class lowercase ( metaclass=_a ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ["""flax"""] def __init__(self , *__a , **__a ) -> Dict: """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Any: """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCamelCase__ (cls , *__a , **__a ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['flax'] )
146
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" _a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _a = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) _a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits _a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean() _a = -(labels.shape[-1] * loss.item()) _a = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
22
0
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class __SCREAMING_SNAKE_CASE ( _a ): def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = parent lowercase : List[str] = config_class lowercase : List[str] = has_text_modality lowercase : List[Any] = kwargs lowercase : List[Any] = common_properties def __lowerCamelCase ( self ): lowercase : str = self.config_class(**self.inputs_dict ) lowercase : Tuple = ( ['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers'''] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['''vocab_size'''] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) , msg=f"""`{prop}` does not exist""" ) # Test that config has the common properties as setter for idx, name in enumerate(lowerCAmelCase_ ): try: setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.parent.assertEqual( getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , msg=f"""`{name} value {idx} expected, but was {getattr(lowerCAmelCase_ , lowerCAmelCase_ )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(lowerCAmelCase_ ): try: lowercase : Tuple = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , msg=f"""`{name} value {idx} expected, but was {getattr(lowerCAmelCase_ , lowerCAmelCase_ )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def __lowerCamelCase ( self ): lowercase : List[Any] = self.config_class(**self.inputs_dict ) lowercase : int = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , lowerCAmelCase_ ) def __lowerCamelCase ( self ): lowercase : str = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase : int = os.path.join(lowerCAmelCase_ , '''config.json''' ) config_first.to_json_file(lowerCAmelCase_ ) lowercase : int = self.config_class.from_json_file(lowerCAmelCase_ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __lowerCamelCase ( self ): lowercase : Dict = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(lowerCAmelCase_ ) lowercase : Union[str, Any] = self.config_class.from_pretrained(lowerCAmelCase_ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __lowerCamelCase ( self ): lowercase : Tuple = self.config_class(**self.inputs_dict ) lowercase : List[str] = '''test''' with tempfile.TemporaryDirectory() as tmpdirname: lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) config_first.save_pretrained(lowerCAmelCase_ ) lowercase : List[Any] = self.config_class.from_pretrained(lowerCAmelCase_ , subfolder=lowerCAmelCase_ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __lowerCamelCase ( self ): lowercase : Dict = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowercase : int = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def __lowerCamelCase ( self ): if self.config_class.is_composition: return lowercase : Tuple = self.config_class() self.parent.assertIsNotNone(lowerCAmelCase_ ) def __lowerCamelCase ( self ): lowercase : List[Any] = copy.deepcopy(lowerCAmelCase_ ) lowercase : int = self.config_class(**lowerCAmelCase_ ) lowercase : str = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) ) elif getattr(lowerCAmelCase_ , lowerCAmelCase_ ) != value: wrong_values.append((key, getattr(lowerCAmelCase_ , lowerCAmelCase_ ), value) ) if len(lowerCAmelCase_ ) > 0: lowercase : Any = '''\n'''.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] ) raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" ) def __lowerCamelCase ( self ): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
319
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _snake_case : Optional[Any] = 8 def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ): '''simple docstring''' _a = x.device _a = (x * 255).int().clamp(0 , 255 ) _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' ) _a = ((x & mask) != 0).float() _a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' ) _a = bits * 2 - 1 return bits def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ): '''simple docstring''' _a = x.device _a = (x > 0).int() _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 ) _a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 255).clamp(0.0 , 1.0 ) def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _a = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _a = self.alphas_cumprod[timestep] _a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _a = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) _a = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu''' _a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase ) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise _a = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ): '''simple docstring''' _a = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 ) else: _a = None # 1. compute alphas, betas _a = self.alphas_cumprod[t] _a = self.alphas_cumprod[t - 1] if t > 0 else self.one _a = 1 - alpha_prod_t _a = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _a = model_output else: raise ValueError(f'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _a = 0 if t > 0: _a = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device ) _a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise _a = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) class A ( _a ): def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int: """simple docstring""" super().__init__() _a = bit_scale _a = ( ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) @torch.no_grad() def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" _a = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , ) _a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale _a = latents.to(self.device ) self.scheduler.set_timesteps(lowerCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample # compute the previous noisy sample x_t -> x_t-1 _a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample _a = bits_to_decimal(lowerCAmelCase_ ) if output_type == "pil": _a = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
22
0
from ...processing_utils import ProcessorMixin class __lowerCAmelCase ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 'WhisperFeatureExtractor' _SCREAMING_SNAKE_CASE = 'WhisperTokenizer' def __init__( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str ) -> List[Any]: """simple docstring""" super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) snake_case_ = self.feature_extractor snake_case_ = False def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Dict=True ) -> List[Any]: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase_ , language=lowerCAmelCase_ , no_timestamps=lowerCAmelCase_ ) def __call__( self : Any , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : List[str] ) -> Dict: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ ) snake_case_ = kwargs.pop("audio" , lowerCAmelCase_ ) snake_case_ = kwargs.pop("sampling_rate" , lowerCAmelCase_ ) snake_case_ = kwargs.pop("text" , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: snake_case_ = args[0] snake_case_ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: snake_case_ = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None: snake_case_ = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ ) if text is None: return inputs elif audio is None: return encodings else: snake_case_ = encodings["input_ids"] return inputs def lowerCAmelCase__ ( self : str , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ) -> Any: """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase__ ( self : Dict , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str="np" ) -> Dict: """simple docstring""" return self.tokenizer.get_prompt_ids(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
283
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Any = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A ( _a ): lowercase_ = 'roformer' def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) _a = vocab_size _a = hidden_size if embedding_size is None else embedding_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = layer_norm_eps _a = rotary_value _a = use_cache class A ( _a ): @property def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} _a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
22
0
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''') @require_sentencepiece @require_tokenizers class lowercase ( _a , unittest.TestCase ): lowercase__ : List[str] = PegasusTokenizer lowercase__ : Optional[Any] = PegasusTokenizerFast lowercase__ : int = True lowercase__ : Dict = True def __snake_case( self : int ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE = PegasusTokenizer(lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __snake_case( self : Optional[Any] ) -> int: '''simple docstring''' return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def __snake_case( self : Dict , **_UpperCamelCase : Optional[int] ) -> PegasusTokenizer: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __snake_case( self : Optional[int] , _UpperCamelCase : int ) -> List[Any]: '''simple docstring''' return ("This is a test", "This is a test") def __snake_case( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = "</s>" SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ ) def __snake_case( self : Optional[int] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(lowerCAmelCase_ ) , 1_103 ) def __snake_case( self : int ) -> Tuple: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_103 ) def __snake_case( self : Optional[Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = ( "Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0] SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __snake_case( self : Dict ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word SCREAMING_SNAKE_CASE = "<mask_1> To ensure a <mask_2> flow of bank resolutions." SCREAMING_SNAKE_CASE = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1] SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __snake_case( self : str ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96_103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_024 SCREAMING_SNAKE_CASE = "To ensure a smooth flow of bank resolutions." SCREAMING_SNAKE_CASE = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1] SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __snake_case( self : Dict ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = ["This is going to be way too long." * 150, "short example"] SCREAMING_SNAKE_CASE = ["not super long but more than 5 tokens", "tiny"] SCREAMING_SNAKE_CASE = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="pt" ) SCREAMING_SNAKE_CASE = self._large_tokenizer( text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1_024) assert batch.attention_mask.shape == (2, 1_024) assert targets["input_ids"].shape == (2, 5) assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask. @slow def __snake_case( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class lowercase ( _a , unittest.TestCase ): lowercase__ : Tuple = PegasusTokenizer lowercase__ : Dict = PegasusTokenizerFast lowercase__ : Tuple = True lowercase__ : Tuple = True def __snake_case( self : Any ) -> str: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE = PegasusTokenizer(lowerCAmelCase_ , offset=0 , mask_token_sent=lowerCAmelCase_ , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __snake_case( self : Tuple ) -> str: '''simple docstring''' return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def __snake_case( self : Tuple , **_UpperCamelCase : List[str] ) -> PegasusTokenizer: '''simple docstring''' return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __snake_case( self : Optional[Any] , _UpperCamelCase : Tuple ) -> Dict: '''simple docstring''' return ("This is a test", "This is a test") def __snake_case( self : str ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE = ( "Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0] SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def __snake_case( self : Union[str, Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = ["This is going to be way too long." * 1_000, "short example"] SCREAMING_SNAKE_CASE = ["not super long but more than 5 tokens", "tiny"] SCREAMING_SNAKE_CASE = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="pt" ) SCREAMING_SNAKE_CASE = self._large_tokenizer( text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4_096) assert batch.attention_mask.shape == (2, 4_096) assert targets["input_ids"].shape == (2, 5) assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask. def __snake_case( self : int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) SCREAMING_SNAKE_CASE = self._large_tokenizer(lowerCAmelCase_ ).input_ids self.assertListEqual( lowerCAmelCase_ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
403
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A : lowercase_ = 42 lowercase_ = 42 class A : def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str: """simple docstring""" _a = [[] for _ in range(lowerCAmelCase_ )] _a = size def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex] ) @property def __lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" return self._size def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict: """simple docstring""" if weight not in (0, 1): raise ValueError('''Edge weight must be either 0 or 1.''' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('''Vertex indexes must be in [0; size).''' ) self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None: """simple docstring""" _a = deque([start_vertex] ) _a = [None] * self.size _a = 0 while queue: _a = queue.popleft() _a = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _a = current_distance + edge.weight _a = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and new_distance >= dest_vertex_distance ): continue _a = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('''No path from start_vertex to finish_vertex.''' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
22
0
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() A_ : List[Any] = logging.get_logger('transformers.models.speecht5') A_ : List[Any] = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } A_ : Union[str, Any] = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } A_ : Tuple = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } A_ : Optional[int] = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } A_ : List[Any] = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } A_ : List[str] = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } A_ : Tuple = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } A_ : str = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } A_ : str = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } A_ : List[str] = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } A_ : Dict = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } A_ : Optional[int] = [] A_ : List[Any] = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] A_ : List[str] = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] A_ : Tuple = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] A_ : Dict = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple: for attribute in key.split('.' ): UpperCamelCase_: List[Any] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) if weight_type is not None: UpperCamelCase_: str = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape else: UpperCamelCase_: Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase_: List[str] = value elif weight_type == "weight_g": UpperCamelCase_: Optional[int] = value elif weight_type == "weight_v": UpperCamelCase_: List[str] = value elif weight_type == "bias": UpperCamelCase_: str = value elif weight_type == "running_mean": UpperCamelCase_: Any = value elif weight_type == "running_var": UpperCamelCase_: str = value elif weight_type == "num_batches_tracked": UpperCamelCase_: List[str] = value else: UpperCamelCase_: Optional[Any] = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]: for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: UpperCamelCase_: Optional[Any] = [] if task == "s2t": UpperCamelCase_: Any = hf_model.speechta.encoder.prenet.feature_encoder UpperCamelCase_: int = MAPPING_S2T UpperCamelCase_: Dict = IGNORE_KEYS_S2T elif task == "t2s": UpperCamelCase_: Optional[int] = None UpperCamelCase_: Dict = MAPPING_T2S UpperCamelCase_: Union[str, Any] = IGNORE_KEYS_T2S elif task == "s2s": UpperCamelCase_: Any = hf_model.speechta.encoder.prenet.feature_encoder UpperCamelCase_: Optional[int] = MAPPING_S2S UpperCamelCase_: Dict = IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(UpperCAmelCase__ , UpperCAmelCase__ ): logger.info(F'''{name} was ignored''' ) continue UpperCamelCase_: Dict = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase_: Tuple = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: UpperCamelCase_ ,UpperCamelCase_: List[Any] = key.split('.*.' ) if prefix in name and suffix in name: UpperCamelCase_: Tuple = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: UpperCamelCase_: Dict = True if "*" in mapped_key: UpperCamelCase_: Any = name.split(UpperCAmelCase__ )[0].split('.' )[-2] UpperCamelCase_: Tuple = mapped_key.replace('*' , UpperCAmelCase__ ) if "weight_g" in name: UpperCamelCase_: Any = 'weight_g' elif "weight_v" in name: UpperCamelCase_: List[Any] = 'weight_v' elif "bias" in name: UpperCamelCase_: Union[str, Any] = 'bias' elif "weight" in name: UpperCamelCase_: int = 'weight' elif "running_mean" in name: UpperCamelCase_: Dict = 'running_mean' elif "running_var" in name: UpperCamelCase_: List[str] = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase_: str = 'num_batches_tracked' else: UpperCamelCase_: int = None set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) continue if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]: UpperCamelCase_: str = full_name.split('conv_layers.' )[-1] UpperCamelCase_: List[Any] = name.split('.' ) UpperCamelCase_: Any = int(items[0] ) UpperCamelCase_: List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase_: Optional[int] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase_: Tuple = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase_: int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase_: Tuple = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase__ ) @torch.no_grad() def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Tuple: if config_path is not None: UpperCamelCase_: Tuple = SpeechTaConfig.from_pretrained(UpperCAmelCase__ ) else: UpperCamelCase_: Optional[Any] = SpeechTaConfig() if task == "s2t": UpperCamelCase_: int = config.max_text_positions UpperCamelCase_: Optional[Any] = SpeechTaForSpeechToText(UpperCAmelCase__ ) elif task == "t2s": UpperCamelCase_: Optional[int] = 1_8_7_6 UpperCamelCase_: str = 6_0_0 UpperCamelCase_: List[str] = config.max_speech_positions UpperCamelCase_: str = SpeechTaForTextToSpeech(UpperCAmelCase__ ) elif task == "s2s": UpperCamelCase_: Dict = 1_8_7_6 UpperCamelCase_: str = config.max_speech_positions UpperCamelCase_: Dict = SpeechTaForSpeechToSpeech(UpperCAmelCase__ ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: UpperCamelCase_: Optional[int] = SpeechTaTokenizer(UpperCAmelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it UpperCamelCase_: Optional[int] = AddedToken('<mask>' , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) UpperCamelCase_: Tuple = mask_token tokenizer.add_special_tokens({'mask_token': mask_token} ) tokenizer.add_tokens(['<ctc_blank>'] ) UpperCamelCase_: Tuple = SpeechTaFeatureExtractor() UpperCamelCase_: List[str] = SpeechTaProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) processor.save_pretrained(UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = torch.load(UpperCAmelCase__ ) recursively_load_weights(fairseq_checkpoint['model'] , UpperCAmelCase__ , UpperCAmelCase__ ) model.save_pretrained(UpperCAmelCase__ ) if repo_id: print('Pushing to the hub...' ) processor.push_to_hub(UpperCAmelCase__ ) model.push_to_hub(UpperCAmelCase__ ) if __name__ == "__main__": A_ : List[Any] = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) A_ : int = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
57
'''simple docstring''' from math import pi, sqrt def snake_case_ (UpperCamelCase : float ): '''simple docstring''' if num <= 0: raise ValueError('''math domain error''' ) if num > 171.5: raise OverflowError('''math range error''' ) elif num - int(UpperCamelCase ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(UpperCamelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def snake_case_ (): '''simple docstring''' assert gamma(0.5 ) == sqrt(UpperCamelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _snake_case : Optional[Any] = 1.0 while num: _snake_case : Dict = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
22
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __A = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
93
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
22
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def lowerCamelCase_ ( A_ ): __lowerCamelCase = SwinvaConfig() __lowerCamelCase = swinva_name.split('''_''' ) __lowerCamelCase = name_split[1] if "to" in name_split[3]: __lowerCamelCase = int(name_split[3][-3:] ) else: __lowerCamelCase = int(name_split[3] ) if "to" in name_split[2]: __lowerCamelCase = int(name_split[2][-2:] ) else: __lowerCamelCase = int(name_split[2][6:] ) if model_size == "tiny": __lowerCamelCase = 96 __lowerCamelCase = (2, 2, 6, 2) __lowerCamelCase = (3, 6, 12, 24) elif model_size == "small": __lowerCamelCase = 96 __lowerCamelCase = (2, 2, 18, 2) __lowerCamelCase = (3, 6, 12, 24) elif model_size == "base": __lowerCamelCase = 1_28 __lowerCamelCase = (2, 2, 18, 2) __lowerCamelCase = (4, 8, 16, 32) else: __lowerCamelCase = 1_92 __lowerCamelCase = (2, 2, 18, 2) __lowerCamelCase = (6, 12, 24, 48) if "to" in swinva_name: __lowerCamelCase = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): __lowerCamelCase = 2_18_41 __lowerCamelCase = '''huggingface/label-files''' __lowerCamelCase = '''imagenet-22k-id2label.json''' __lowerCamelCase = json.load(open(hf_hub_download(A_ , A_ , repo_type='''dataset''' ) , '''r''' ) ) __lowerCamelCase = {int(A_ ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} else: __lowerCamelCase = 10_00 __lowerCamelCase = '''huggingface/label-files''' __lowerCamelCase = '''imagenet-1k-id2label.json''' __lowerCamelCase = json.load(open(hf_hub_download(A_ , A_ , repo_type='''dataset''' ) , '''r''' ) ) __lowerCamelCase = {int(A_ ): v for k, v in idalabel.items()} __lowerCamelCase = idalabel __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = img_size __lowerCamelCase = num_classes __lowerCamelCase = embed_dim __lowerCamelCase = depths __lowerCamelCase = num_heads __lowerCamelCase = window_size return config def lowerCamelCase_ ( A_ ): if "patch_embed.proj" in name: __lowerCamelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: __lowerCamelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: __lowerCamelCase = '''encoder.''' + name if "attn.proj" in name: __lowerCamelCase = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __lowerCamelCase = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __lowerCamelCase = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __lowerCamelCase = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __lowerCamelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __lowerCamelCase = name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: __lowerCamelCase = name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: __lowerCamelCase = name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: __lowerCamelCase = name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: __lowerCamelCase = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if name == "norm.weight": __lowerCamelCase = '''layernorm.weight''' if name == "norm.bias": __lowerCamelCase = '''layernorm.bias''' if "head" in name: __lowerCamelCase = name.replace('''head''' , '''classifier''' ) else: __lowerCamelCase = '''swinv2.''' + name return name def lowerCamelCase_ ( A_ , A_ ): for key in orig_state_dict.copy().keys(): __lowerCamelCase = orig_state_dict.pop(A_ ) if "mask" in key: continue elif "qkv" in key: __lowerCamelCase = key.split('''.''' ) __lowerCamelCase = int(key_split[1] ) __lowerCamelCase = int(key_split[3] ) __lowerCamelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowerCamelCase = val[:dim, :] __lowerCamelCase = val[dim : dim * 2, :] __lowerCamelCase = val[-dim:, :] else: __lowerCamelCase = val[:dim] __lowerCamelCase = val[ dim : dim * 2 ] __lowerCamelCase = val[-dim:] else: __lowerCamelCase = val return orig_state_dict def lowerCamelCase_ ( A_ , A_ ): __lowerCamelCase = timm.create_model(A_ , pretrained=A_ ) timm_model.eval() __lowerCamelCase = get_swinva_config(A_ ) __lowerCamelCase = SwinvaForImageClassification(A_ ) model.eval() __lowerCamelCase = convert_state_dict(timm_model.state_dict() , A_ ) model.load_state_dict(A_ ) __lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCamelCase = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) ) __lowerCamelCase = Image.open(requests.get(A_ , stream=A_ ).raw ) __lowerCamelCase = image_processor(images=A_ , return_tensors='''pt''' ) __lowerCamelCase = timm_model(inputs['''pixel_values'''] ) __lowerCamelCase = model(**A_ ).logits assert torch.allclose(A_ , A_ , atol=1e-3 ) print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) model.push_to_hub( repo_path_or_name=Path(A_ , A_ ) , organization='''nandwalritik''' , commit_message='''Add model''' , ) if __name__ == "__main__": _UpperCamelCase : List[str] =argparse.ArgumentParser() # Required parameters parser.add_argument( "--swinv2_name", default="swinv2_tiny_patch4_window8_256", type=str, help="Name of the Swinv2 timm model you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) _UpperCamelCase : int =parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
316
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' _snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' _snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def snake_case_ (UpperCamelCase : Tuple ): '''simple docstring''' def remove_articles(UpperCamelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase ) def white_space_fix(UpperCamelCase : Union[str, Any] ): return " ".join(text.split() ) def remove_punc(UpperCamelCase : str ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCamelCase : Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) ) def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ): '''simple docstring''' _a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )] return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100 def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(UpperCamelCase ) _a = Counter(UpperCamelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(UpperCamelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = keeptmpscorea / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(UpperCamelCase ) > 0: _a = deltmpscorea / len(UpperCamelCase ) # ADDITION _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = set(UpperCamelCase ) & set(UpperCamelCase ) _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' _a = len(UpperCamelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ): '''simple docstring''' if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ): sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] ) _a = sari_score / len(UpperCamelCase ) return 100 * sari_score def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ): '''simple docstring''' _a = len(references[0] ) if any(len(UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(UpperCamelCase )] _a = sacrebleu.corpus_bleu( UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict: """simple docstring""" _a = {} result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) return result
22
0
class lowerCAmelCase__ : def __init__( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] ) -> Dict: A = name A = value A = weight def __repr__( self : Dict ) -> int: return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def __UpperCamelCase ( self : Any ) -> Optional[Any]: return self.value def __UpperCamelCase ( self : Any ) -> List[Any]: return self.name def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: return self.weight def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: return self.value / self.weight def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A = [] for i in range(len(lowerCAmelCase__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ) -> Tuple: '''simple docstring''' A = sorted(lowerCAmelCase__ , key=lowerCAmelCase__ , reverse=lowerCAmelCase__ ) A = [] A , A = 0.0, 0.0 for i in range(len(lowerCAmelCase__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def lowerCamelCase_ ( ) -> Any: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
106
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): _snake_case : Tuple = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: _snake_case : Any = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def snake_case_ (UpperCamelCase : Optional[int] ): '''simple docstring''' _a = (images / 2 + 0.5).clamp(0 , 1 ) _a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _a = numpy_to_pil(UpperCamelCase ) return images def snake_case_ (UpperCamelCase : str ): '''simple docstring''' if images.ndim == 3: _a = images[None, ...] _a = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: _a = [Image.fromarray(UpperCamelCase ) for image in images] return pil_images
22
0
from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging _UpperCamelCase = logging.get_logger(__name__) def lowerCAmelCase__( lowercase : Union[str, Any] , lowercase : Optional[Any] ) -> Tuple: try: with open(lowercase , "rb" ) as flax_state_f: __snake_case : Any = from_bytes(lowercase , flax_state_f.read() ) except UnpicklingError as e: try: with open(lowercase ) as f: if f.read().startswith("version" ): raise OSError( "You seem to have cloned a repository without having git-lfs installed. Please" " install git-lfs and run `git lfs install` followed by `git lfs pull` in the" " folder you cloned." ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ ) return load_flax_weights_in_pytorch_model(lowercase , lowercase ) def lowerCAmelCase__( lowercase : Any , lowercase : int ) -> List[Any]: try: import torch # noqa: F401 except ImportError: logger.error( "Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise # check if we have bf16 weights __snake_case : Any = flatten_dict(jax.tree_util.tree_map(lambda lowercase : x.dtype == jnp.bfloataa , lowercase ) ).values() if any(lowercase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " "before loading those in PyTorch model." ) __snake_case : List[str] = jax.tree_util.tree_map( lambda lowercase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowercase ) __snake_case : List[str] = "" __snake_case : int = flatten_dict(lowercase , sep="." ) __snake_case : str = pt_model.state_dict() # keep track of unexpected & missing keys __snake_case : List[Any] = [] __snake_case : int = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): __snake_case : Any = flax_key_tuple.split("." ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: __snake_case : List[Any] = flax_key_tuple_array[:-1] + ["weight"] __snake_case : Union[str, Any] = jnp.transpose(lowercase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": __snake_case : int = flax_key_tuple_array[:-1] + ["weight"] __snake_case : List[Any] = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": __snake_case : Optional[int] = flax_key_tuple_array[:-1] + ["weight"] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(lowercase ): __snake_case : Dict = ( flax_key_tuple_string.replace("_0" , ".0" ) .replace("_1" , ".1" ) .replace("_2" , ".2" ) .replace("_3" , ".3" ) .replace("_4" , ".4" ) .replace("_5" , ".5" ) .replace("_6" , ".6" ) .replace("_7" , ".7" ) .replace("_8" , ".8" ) .replace("_9" , ".9" ) ) __snake_case : str = ".".join(lowercase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """ f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) else: # add weight to pytorch dict __snake_case : Tuple = np.asarray(lowercase ) if not isinstance(lowercase , np.ndarray ) else flax_tensor __snake_case : List[Any] = torch.from_numpy(lowercase ) # remove from missing keys missing_keys.remove(lowercase ) else: # weight is not expected by PyTorch model unexpected_keys.append(lowercase ) pt_model.load_state_dict(lowercase ) # re-transform missing_keys to list __snake_case : Optional[Any] = list(lowercase ) if len(lowercase ) > 0: logger.warning( "Some weights of the Flax model were not used when initializing the PyTorch model" f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing""" f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture""" " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect""" " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" " FlaxBertForSequenceClassification model)." ) if len(lowercase ) > 0: logger.warning( f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly""" f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to""" " use it for predictions and inference." ) return pt_model
243
'''simple docstring''' import requests def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ): '''simple docstring''' _a = {'''Content-Type''': '''application/json'''} _a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase ) if response.status_code != 200: _a = ( '''Request to slack returned an error ''' f'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
22
0
'''simple docstring''' import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter snake_case = True except ImportError: snake_case = False snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase_ ( lowerCamelCase_ ): """simple docstring""" return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class lowerCAmelCase ( _a ): @staticmethod def _A ( a__ : ArgumentParser ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = parser.add_parser("add-new-model" ) add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." ) add_new_model_parser.add_argument("--testing_file" , type=lowerCAmelCase_ , help="Configuration file on which to run." ) add_new_model_parser.add_argument( "--path" , type=lowerCAmelCase_ , help="Path to cookiecutter. Should only be used for testing purposes." ) add_new_model_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self : Optional[Any] , a__ : bool , a__ : str , a__ : List[str]=None , *a__ : Tuple ): '''simple docstring''' lowerCAmelCase__ : str = testing lowerCAmelCase__ : Tuple = testing_file lowerCAmelCase__ : Union[str, Any] = path def _A ( self : Union[str, Any] ): '''simple docstring''' warnings.warn( "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. " "It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality " "checks, you should use `transformers-cli add-new-model-like` instead." ) if not _has_cookiecutter: raise ImportError( "Model creation dependencies are required to use the `add_new_model` command. Install them by running " "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory lowerCAmelCase__ : List[str] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]] if len(lowerCAmelCase_ ) > 0: raise ValueError( "Several directories starting with `cookiecutter-template-` in current working directory. " "Please clean your directory by removing all folders starting with `cookiecutter-template-` or " "change your working directory." ) lowerCAmelCase__ : Optional[int] = ( Path(lowerCAmelCase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) lowerCAmelCase__ : Optional[Any] = path_to_transformer_root / "templates" / "adding_a_new_model" # Execute cookiecutter if not self._testing: cookiecutter(str(lowerCAmelCase_ ) ) else: with open(self._testing_file , "r" ) as configuration_file: lowerCAmelCase__ : List[str] = json.load(lowerCAmelCase_ ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase_ , extra_context=lowerCAmelCase_ , ) lowerCAmelCase__ : Any = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0] # Retrieve configuration with open(directory + "/configuration.json" , "r" ) as configuration_file: lowerCAmelCase__ : Optional[Any] = json.load(lowerCAmelCase_ ) lowerCAmelCase__ : Union[str, Any] = configuration["lowercase_modelname"] lowerCAmelCase__ : Dict = configuration["generate_tensorflow_pytorch_and_flax"] os.remove(F'''{directory}/configuration.json''' ) lowerCAmelCase__ : str = "PyTorch" in generate_tensorflow_pytorch_and_flax lowerCAmelCase__ : Optional[Any] = "TensorFlow" in generate_tensorflow_pytorch_and_flax lowerCAmelCase__ : int = "Flax" in generate_tensorflow_pytorch_and_flax lowerCAmelCase__ : List[str] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowerCAmelCase_ ) # Tests require submodules as they have parent imports with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ): pass shutil.move( F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , ) shutil.move( F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(a__ : int ): with open(lowerCAmelCase_ , "r" ) as f: lowerCAmelCase__ : Optional[Any] = f.readlines() with open(lowerCAmelCase_ , "w" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(lowerCAmelCase_ ) if output_pytorch: if not self._testing: remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(a__ : str , a__ : str , a__ : List[str] ): # Create temp file lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = mkstemp() lowerCAmelCase__ : Dict = False with fdopen(lowerCAmelCase_ , "w" ) as new_file: with open(lowerCAmelCase_ ) as old_file: for line in old_file: new_file.write(lowerCAmelCase_ ) if line_to_copy_below in line: lowerCAmelCase__ : int = True for line_to_copy in lines_to_copy: new_file.write(lowerCAmelCase_ ) if not line_found: raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(lowerCAmelCase_ , lowerCAmelCase_ ) # Remove original file remove(lowerCAmelCase_ ) # Move new file move(lowerCAmelCase_ , lowerCAmelCase_ ) def skip_units(a__ : str ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(a__ : Optional[Any] ): with open(lowerCAmelCase_ ) as datafile: lowerCAmelCase__ : List[Any] = [] lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : List[str] = False for line in datafile: if "# To replace in: " in line and "##" not in line: lowerCAmelCase__ : Any = line.split("\"" )[1] lowerCAmelCase__ : Union[str, Any] = skip_units(lowerCAmelCase_ ) elif "# Below: " in line and "##" not in line: lowerCAmelCase__ : Any = line.split("\"" )[1] lowerCAmelCase__ : Dict = skip_units(lowerCAmelCase_ ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase__ : Tuple = [] elif "# Replace with" in line and "##" not in line: lowerCAmelCase__ : Optional[int] = [] elif "##" not in line: lines_to_copy.append(lowerCAmelCase_ ) remove(lowerCAmelCase_ ) replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(lowerCAmelCase_ )
378
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _snake_case : Tuple = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''shortest_edge''': 2_56} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any: """simple docstring""" _a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase_ ): _a = target_sizes.numpy() _a = [] for idx in range(len(lowerCAmelCase_ ) ): _a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ ) _a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase_ ) else: _a = logits.argmax(dim=1 ) _a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def A ( self : Any )-> Tuple: __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = BlipImageProcessor() __UpperCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) __UpperCamelCase = BlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def A ( self : List[Any] , **A_ : List[str] )-> str: return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer def A ( self : int , **A_ : Optional[int] )-> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor def A ( self : Optional[int] )-> int: shutil.rmtree(self.tmpdirname ) def A ( self : List[str] )-> Any: __UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __UpperCamelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def A ( self : List[Any] )-> int: __UpperCamelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __UpperCamelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) __UpperCamelCase = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 ) __UpperCamelCase = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowerCAmelCase_ ) def A ( self : Any )-> Any: __UpperCamelCase = self.get_image_processor() __UpperCamelCase = self.get_tokenizer() __UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) __UpperCamelCase = self.prepare_image_inputs() __UpperCamelCase = image_processor(lowerCAmelCase_ , return_tensors="np" ) __UpperCamelCase = processor(images=lowerCAmelCase_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A ( self : str )-> Optional[int]: __UpperCamelCase = self.get_image_processor() __UpperCamelCase = self.get_tokenizer() __UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) __UpperCamelCase = "lower newer" __UpperCamelCase = processor(text=lowerCAmelCase_ ) __UpperCamelCase = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A ( self : str )-> Optional[int]: __UpperCamelCase = self.get_image_processor() __UpperCamelCase = self.get_tokenizer() __UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) __UpperCamelCase = "lower newer" __UpperCamelCase = self.prepare_image_inputs() __UpperCamelCase = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase_ ): processor() def A ( self : List[str] )-> str: __UpperCamelCase = self.get_image_processor() __UpperCamelCase = self.get_tokenizer() __UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) __UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __UpperCamelCase = processor.batch_decode(lowerCAmelCase_ ) __UpperCamelCase = tokenizer.batch_decode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def A ( self : Any )-> Optional[Any]: __UpperCamelCase = self.get_image_processor() __UpperCamelCase = self.get_tokenizer() __UpperCamelCase = BlipProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ ) __UpperCamelCase = "lower newer" __UpperCamelCase = self.prepare_image_inputs() __UpperCamelCase = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
505
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ): '''simple docstring''' _a = {} if train_file is not None: _a = [train_file] if eval_file is not None: _a = [eval_file] if test_file is not None: _a = [test_file] _a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase ) _a = list(ds[list(files.keys() )[0]].features.keys() ) _a = features_name.pop(UpperCamelCase ) _a = list(set(ds[list(files.keys() )[0]][label_name] ) ) _a = {label: i for i, label in enumerate(UpperCamelCase )} _a = tokenizer.model_input_names _a = {} if len(UpperCamelCase ) == 1: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , ) elif len(UpperCamelCase ) == 2: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _snake_case : str = logging.getLogger(__name__) @dataclass class A : lowercase_ = field(metadata={'help': 'Which column contains the label'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} ) lowercase_ = field( default=128 ,metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) lowercase_ = field( default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A : lowercase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowercase_ = field( default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,) def snake_case_ (): '''simple docstring''' _a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _a , _a , _a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _a , _a , _a , _a = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _a = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict: _a = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _a = TFTrainer( model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _a = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _a = trainer.evaluate() _a = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(UpperCamelCase ) return results if __name__ == "__main__": main()
22
0
from __future__ import annotations import math def UpperCamelCase_( snake_case__: int ) -> List[Any]: if num <= 0: UpperCAmelCase__ = f"{num}: Invalid input, please enter a positive integer." raise ValueError(snake_case__ ) UpperCAmelCase__ = [True] * (num + 1) UpperCAmelCase__ = [] UpperCAmelCase__ = 2 UpperCAmelCase__ = int(math.sqrt(snake_case__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(snake_case__ ) # Set multiples of start be False for i in range(start * start , num + 1 , snake_case__ ): if sieve[i] is True: UpperCAmelCase__ = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(snake_case__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
146
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( _a ,unittest.TestCase ): lowercase_ = LEDTokenizer lowercase_ = LEDTokenizerFast lowercase_ = True def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" super().setUp() _a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a = {'''unk_token''': '''<unk>'''} _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]: """simple docstring""" return "lower newer", "lower newer" @cached_property def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def __lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _a = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , lowerCAmelCase_ ) self.assertIn('''attention_mask''' , lowerCAmelCase_ ) self.assertNotIn('''labels''' , lowerCAmelCase_ ) self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" _a = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer( ['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = ['''A long paragraph for summarization.'''] _a = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' ) _a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' ) _a = inputs['''input_ids'''] _a = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = ['''Summary of the text.''', '''Another summary.'''] _a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']] _a = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" pass def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = '''A, <mask> AllenNLP sentence.''' _a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
22
0
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=1000 , ): lowercase : int = parent lowercase : Union[str, Any] = batch_size lowercase : Tuple = num_channels lowercase : List[str] = image_size lowercase : str = patch_size lowercase : Optional[Any] = is_training lowercase : Optional[Any] = use_input_mask lowercase : Tuple = use_token_type_ids lowercase : Union[str, Any] = use_labels lowercase : str = vocab_size lowercase : int = hidden_size lowercase : Any = num_hidden_layers lowercase : Dict = num_attention_heads lowercase : List[str] = intermediate_size lowercase : Union[str, Any] = hidden_act lowercase : Optional[int] = hidden_dropout_prob lowercase : Dict = attention_probs_dropout_prob lowercase : Optional[Any] = max_position_embeddings lowercase : Optional[Any] = type_vocab_size lowercase : str = type_sequence_label_size lowercase : Tuple = initializer_range lowercase : List[Any] = coordinate_size lowercase : Optional[int] = shape_size lowercase : List[Any] = num_labels lowercase : int = num_choices lowercase : str = scope lowercase : Optional[int] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowercase : Optional[Any] = text_seq_length lowercase : Optional[int] = (image_size // patch_size) ** 2 + 1 lowercase : int = self.text_seq_length + self.image_seq_length def __lowerCamelCase ( self ): lowercase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) lowercase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) lowercase : int = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase : List[str] = bbox[i, j, 3] lowercase : str = bbox[i, j, 1] lowercase : List[Any] = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: lowercase : Optional[Any] = bbox[i, j, 2] lowercase : Dict = bbox[i, j, 0] lowercase : Optional[int] = tmp_coordinate lowercase : Dict = tf.constant(lowerCAmelCase_ ) lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : List[str] = None if self.use_input_mask: lowercase : int = random_attention_mask([self.batch_size, self.text_seq_length] ) lowercase : Optional[int] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) lowercase : Any = None lowercase : Dict = None if self.use_labels: lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) lowercase : List[str] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : int = TFLayoutLMvaModel(config=lowerCAmelCase_ ) # text + image lowercase : Tuple = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ ) lowercase : Tuple = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , training=lowerCAmelCase_ , ) lowercase : str = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only lowercase : Any = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only lowercase : List[str] = model({'''pixel_values''': pixel_values} , training=lowerCAmelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = self.num_labels lowercase : str = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase_ ) lowercase : List[str] = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = self.num_labels lowercase : Dict = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase_ ) lowercase : int = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = 2 lowercase : int = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ ) lowercase : Any = model( lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , training=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.prepare_config_and_inputs() ((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Dict = config_and_inputs lowercase : Optional[int] = { '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): A : List[Any] = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) A : Optional[int] = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) A : Tuple = False A : Any = False A : int = False def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return True def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): lowercase : List[str] = copy.deepcopy(lowerCAmelCase_ ) if model_class in get_values(lowerCAmelCase_ ): lowercase : Tuple = { k: tf.tile(tf.expand_dims(lowerCAmelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(lowerCAmelCase_ , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCAmelCase_ ): lowercase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCAmelCase_ ): lowercase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) lowercase : List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCAmelCase_ ): lowercase : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(lowerCAmelCase_ ): lowercase : Any = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __lowerCamelCase ( self ): lowercase : Optional[int] = TFLayoutLMvaModelTester(self ) lowercase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : str = model_class(lowerCAmelCase_ ) if getattr(lowerCAmelCase_ , '''hf_compute_loss''' , lowerCAmelCase_ ): # The number of elements in the loss should be the same as the number of elements in the label lowercase : Tuple = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) lowercase : Optional[int] = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase_ )[0] ] lowercase : Optional[int] = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs lowercase : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) lowercase : str = prepared_for_class.pop('''input_ids''' ) lowercase : Any = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions lowercase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) lowercase : Optional[Any] = prepared_for_class.pop('''input_ids''' ) if "labels" in prepared_for_class: lowercase : int = prepared_for_class['''labels'''].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: lowercase : str = -100 lowercase : List[Any] = tf.convert_to_tensor(lowerCAmelCase_ ) lowercase : str = model(lowerCAmelCase_ , **lowerCAmelCase_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict lowercase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) lowercase : Optional[int] = model(lowerCAmelCase_ )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple lowercase : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) # Get keys that were added with the _prepare_for_class function lowercase : List[str] = prepared_for_class.keys() - inputs_dict.keys() lowercase : Any = inspect.signature(model.call ).parameters lowercase : Optional[Any] = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple lowercase : List[Any] = {0: '''input_ids'''} for label_key in label_keys: lowercase : Union[str, Any] = signature_names.index(lowerCAmelCase_ ) lowercase : Dict = label_key lowercase : str = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple lowercase : Dict = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: lowercase : int = prepared_for_class[value] lowercase : str = tuple(lowerCAmelCase_ ) # Send to model lowercase : Any = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __lowerCamelCase ( self ): ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( self ): ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase : List[Any] = type self.model_tester.create_and_check_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( self ): ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( self ): ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( self ): ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @slow def __lowerCamelCase ( self ): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Dict = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def __lowercase ( ) ->Optional[int]: """simple docstring""" lowercase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __lowerCamelCase ( self ): return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None @slow def __lowerCamelCase ( self ): lowercase : Optional[Any] = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ) lowercase : List[str] = self.default_image_processor lowercase : Union[str, Any] = prepare_img() lowercase : Tuple = image_processor(images=lowerCAmelCase_ , return_tensors='''tf''' ).pixel_values lowercase : Any = tf.constant([[1, 2]] ) lowercase : Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass lowercase : List[Any] = model(input_ids=lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits lowercase : Dict = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ ) lowercase : Optional[int] = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
319
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def snake_case_ (UpperCamelCase : SplitDict ): '''simple docstring''' _a = split_dict._to_yaml_list() assert len(UpperCamelCase ) == len(UpperCamelCase ) _a = SplitDict._from_yaml_list(UpperCamelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump _a = None # the split name of split_dict takes over the name of the split info object _a = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] ) def snake_case_ (UpperCamelCase : List[str] ): '''simple docstring''' _a = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
22
0
import random def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Any )->Tuple: '''simple docstring''' snake_case_ = a[left_index] snake_case_ = left_index + 1 for j in range(left_index + 1 , lowerCAmelCase_ ): if a[j] < pivot: snake_case_ , snake_case_ = a[i], a[j] i += 1 snake_case_ , snake_case_ = a[i - 1], a[left_index] return i - 1 def _lowerCAmelCase ( lowerCAmelCase_ :Any , lowerCAmelCase_ :str , lowerCAmelCase_ :List[Any] )->List[str]: '''simple docstring''' if left < right: snake_case_ = random.randint(lowerCAmelCase_ , right - 1 ) snake_case_ , snake_case_ = ( a[left], a[pivot], ) # switches the pivot with the left most bound snake_case_ = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) quick_sort_random( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point quick_sort_random( lowerCAmelCase_ , pivot_index + 1 , lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point def _lowerCAmelCase ( )->int: '''simple docstring''' snake_case_ = input("Enter numbers separated by a comma:\n" ).strip() snake_case_ = [int(lowerCAmelCase_ ) for item in user_input.split("," )] quick_sort_random(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) ) print(lowerCAmelCase_ ) if __name__ == "__main__": main()
283
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n' class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" _a = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _a = self.diffusers_dir shutil.copy( os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _a = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]: """simple docstring""" _a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: _a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result _a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) _a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ ) _a = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f: f.write(lowerCAmelCase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ ) with open(lowerCAmelCase_ , '''r''' ) as f: self.assertTrue(f.read() , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , ) # Copy consistency with a really long name _a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
22
0
import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Tuple = {'vocab_file': 'sentencepiece.model'} _lowerCamelCase : List[Any] = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } _lowerCamelCase : str = { 'google/rembert': 2_56, } class lowercase ( _a ): lowercase__ : str = VOCAB_FILES_NAMES lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple=False , _UpperCamelCase : Tuple=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Optional[Any]="[CLS]" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Dict="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[str]="[PAD]" , _UpperCamelCase : Optional[int]="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , **_UpperCamelCase : List[Any] , ) -> int: '''simple docstring''' super().__init__( do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) SCREAMING_SNAKE_CASE = do_lower_case SCREAMING_SNAKE_CASE = remove_space SCREAMING_SNAKE_CASE = keep_accents SCREAMING_SNAKE_CASE = vocab_file SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor() self.sp_model.Load(lowerCAmelCase_ ) @property def __snake_case( self : str ) -> Any: '''simple docstring''' return len(self.sp_model ) def __snake_case( self : Dict ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None return state def __setstate__( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = d SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def __snake_case( self : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=False ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(lowerCAmelCase_ ) return pieces def __snake_case( self : int , _UpperCamelCase : Dict ) -> Union[str, Any]: '''simple docstring''' return self.sp_model.PieceToId(lowerCAmelCase_ ) def __snake_case( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> int: '''simple docstring''' return self.sp_model.IdToPiece(lowerCAmelCase_ ) def __snake_case( self : Tuple , _UpperCamelCase : Any ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.sp_model.decode_pieces(lowerCAmelCase_ ) return out_string def __snake_case( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __snake_case( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1] return [1] + ([0] * len(lowerCAmelCase_ )) + [1] def __snake_case( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __snake_case( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase_ ): logger.error("Vocabulary path ({}) should be a directory".format(lowerCAmelCase_ ) ) return SCREAMING_SNAKE_CASE = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
403
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _snake_case : Tuple = logging.get_logger(__name__) _snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _snake_case : List[Any] = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } _snake_case : Union[str, Any] = { 'squeezebert/squeezebert-uncased': 512, 'squeezebert/squeezebert-mnli': 512, 'squeezebert/squeezebert-mnli-headless': 512, } _snake_case : Tuple = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class A ( _a ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_INIT_CONFIGURATION lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = SqueezeBertTokenizer def __init__( self : str , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Any="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : Optional[int] , ) -> int: """simple docstring""" super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase_ ) != tokenize_chinese_chars ): _a = getattr(lowerCAmelCase_ , normalizer_state.pop('''type''' ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**lowerCAmelCase_ ) _a = do_lower_case def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any]=None ) -> List[str]: """simple docstring""" _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" _a = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ )
22
0
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" def _a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self ): UpperCamelCase_: List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) UpperCamelCase_: List[str] = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('sample_euler' ) UpperCamelCase_: Any = 'A painting of a squirrel eating a burger' UpperCamelCase_: Optional[Any] = torch.manual_seed(0 ) UpperCamelCase_: Union[str, Any] = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) UpperCamelCase_: List[str] = output.images UpperCamelCase_: str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCamelCase_: int = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _a ( self ): UpperCamelCase_: List[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_: Optional[int] = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('sample_euler' ) UpperCamelCase_: str = 'A painting of a squirrel eating a burger' UpperCamelCase_: Union[str, Any] = torch.manual_seed(0 ) UpperCamelCase_: str = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='np' ) UpperCamelCase_: List[str] = output.images UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCamelCase_: Optional[Any] = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def _a ( self ): UpperCamelCase_: List[str] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) UpperCamelCase_: Optional[Any] = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('sample_dpmpp_2m' ) UpperCamelCase_: Dict = 'A painting of a squirrel eating a burger' UpperCamelCase_: Union[str, Any] = torch.manual_seed(0 ) UpperCamelCase_: List[Any] = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='np' , use_karras_sigmas=lowerCAmelCase_ , ) UpperCamelCase_: Optional[Any] = output.images UpperCamelCase_: int = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCamelCase_: Dict = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
57
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _snake_case : Dict = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : int , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = do_rescale _a = do_normalize _a = do_center_crop _a = crop_size _a = size _a = resample _a = rescale_factor _a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _a = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "shortest_edge" in size: _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _a = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Dict , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> BatchFeature: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = do_rescale if do_rescale is not None else self.do_rescale _a = do_normalize if do_normalize is not None else self.do_normalize _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ ) if not is_batched(lowerCAmelCase_ ): _a = [images] if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
22
0
"""simple docstring""" import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __A = get_logger(__name__) class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase = None ): '''simple docstring''' lowerCAmelCase__ :Any = ( os.path.join(lowerCAmelCase_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) lowerCAmelCase__ :List[Any] = Extractor def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" lowerCAmelCase__ :Optional[Any] = os.path.abspath(lowerCAmelCase_ ) return os.path.join(self.extract_dir , hash_url_to_filename(lowerCAmelCase_ ) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return force_extract or ( not os.path.isfile(lowerCAmelCase_ ) and not (os.path.isdir(lowerCAmelCase_ ) and os.listdir(lowerCAmelCase_ )) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = self.extractor.infer_extractor_format(lowerCAmelCase_ ) if not extractor_format: return input_path lowerCAmelCase__ :Any = self._get_output_path(lowerCAmelCase_ ) if self._do_extract(lowerCAmelCase_ , lowerCAmelCase_ ): self.extractor.extract(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return output_path class _lowerCAmelCase ( _a ): """simple docstring""" @classmethod @abstractmethod def snake_case ( cls , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' ... @staticmethod @abstractmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' ... class _lowerCAmelCase ( _a , _a ): """simple docstring""" __magic_name__ :Optional[Any] = [] @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' with open(lowerCAmelCase_ , 'rb' ) as f: return f.read(lowerCAmelCase_ ) @classmethod def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase = b"" ): '''simple docstring''' if not magic_number: lowerCAmelCase__ :str = max(len(lowerCAmelCase_ ) for cls_magic_number in cls.magic_numbers ) try: lowerCAmelCase__ :int = cls.read_magic_number(lowerCAmelCase_ , lowerCAmelCase_ ) except OSError: return False return any(magic_number.startswith(lowerCAmelCase_ ) for cls_magic_number in cls.magic_numbers ) class _lowerCAmelCase ( _a ): """simple docstring""" @classmethod def snake_case ( cls , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return tarfile.is_tarfile(lowerCAmelCase_ ) @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' def resolved(__UpperCAmelCase ) -> str: return os.path.realpath(os.path.abspath(lowerCAmelCase_ ) ) def badpath(__UpperCAmelCase , __UpperCAmelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ).startswith(lowerCAmelCase_ ) def badlink(__UpperCAmelCase , __UpperCAmelCase ) -> bool: # Links are interpreted relative to the directory containing the link lowerCAmelCase__ :Any = resolved(os.path.join(lowerCAmelCase_ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=lowerCAmelCase_ ) lowerCAmelCase__ :int = resolved(lowerCAmelCase_ ) for finfo in members: if badpath(finfo.name , lowerCAmelCase_ ): logger.error(F"Extraction of {finfo.name} is blocked (illegal path)" ) elif finfo.issym() and badlink(lowerCAmelCase_ , lowerCAmelCase_ ): logger.error(F"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" ) elif finfo.islnk() and badlink(lowerCAmelCase_ , lowerCAmelCase_ ): logger.error(F"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" ) else: yield finfo @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) lowerCAmelCase__ :Any = tarfile.open(lowerCAmelCase_ ) tar_file.extractall(lowerCAmelCase_ , members=TarExtractor.safemembers(lowerCAmelCase_ , lowerCAmelCase_ ) ) tar_file.close() class _lowerCAmelCase ( _a ): """simple docstring""" __magic_name__ :Tuple = [b"""\x1F\x8B"""] @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' with gzip.open(lowerCAmelCase_ , 'rb' ) as gzip_file: with open(lowerCAmelCase_ , 'wb' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ ) class _lowerCAmelCase ( _a ): """simple docstring""" __magic_name__ :Any = [ b"""PK\x03\x04""", b"""PK\x05\x06""", # empty archive b"""PK\x07\x08""", # spanned archive ] @classmethod def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase = b"" ): '''simple docstring''' if super().is_extractable(lowerCAmelCase_ , magic_number=lowerCAmelCase_ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(lowerCAmelCase_ , 'rb' ) as fp: lowerCAmelCase__ :Union[str, Any] = _EndRecData(lowerCAmelCase_ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: lowerCAmelCase__ :Optional[int] = fp.read(lowerCAmelCase_ ) # CD is where we expect it to be if len(lowerCAmelCase_ ) == sizeCentralDir: lowerCAmelCase__ :Optional[Any] = struct.unpack(lowerCAmelCase_ , lowerCAmelCase_ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) with zipfile.ZipFile(lowerCAmelCase_ , 'r' ) as zip_file: zip_file.extractall(lowerCAmelCase_ ) zip_file.close() class _lowerCAmelCase ( _a ): """simple docstring""" __magic_name__ :Tuple = [b"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' with lzma.open(lowerCAmelCase_ ) as compressed_file: with open(lowerCAmelCase_ , 'wb' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ ) class _lowerCAmelCase ( _a ): """simple docstring""" __magic_name__ :Any = [b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if not config.RARFILE_AVAILABLE: raise ImportError('Please pip install rarfile' ) import rarfile os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) lowerCAmelCase__ :List[Any] = rarfile.RarFile(lowerCAmelCase_ ) rf.extractall(lowerCAmelCase_ ) rf.close() class _lowerCAmelCase ( _a ): """simple docstring""" __magic_name__ :str = [b"""\x28\xb5\x2F\xFD"""] @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if not config.ZSTANDARD_AVAILABLE: raise ImportError('Please pip install zstandard' ) import zstandard as zstd lowerCAmelCase__ :List[Any] = zstd.ZstdDecompressor() with open(lowerCAmelCase_ , 'rb' ) as ifh, open(lowerCAmelCase_ , 'wb' ) as ofh: dctx.copy_stream(lowerCAmelCase_ , lowerCAmelCase_ ) class _lowerCAmelCase ( _a ): """simple docstring""" __magic_name__ :Optional[int] = [b"""\x42\x5A\x68"""] @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' with bza.open(lowerCAmelCase_ , 'rb' ) as compressed_file: with open(lowerCAmelCase_ , 'wb' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ ) class _lowerCAmelCase ( _a ): """simple docstring""" __magic_name__ :Any = [b"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if not config.PY7ZR_AVAILABLE: raise ImportError('Please pip install py7zr' ) import pyazr os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) with pyazr.SevenZipFile(lowerCAmelCase_ , 'r' ) as archive: archive.extractall(lowerCAmelCase_ ) class _lowerCAmelCase ( _a ): """simple docstring""" __magic_name__ :List[str] = [b"""\x04\x22\x4D\x18"""] @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if not config.LZ4_AVAILABLE: raise ImportError('Please pip install lz4' ) import lza.frame with lza.frame.open(lowerCAmelCase_ , 'rb' ) as compressed_file: with open(lowerCAmelCase_ , 'wb' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase_ , lowerCAmelCase_ ) class _lowerCAmelCase : """simple docstring""" __magic_name__ :int = { """tar""": TarExtractor, """gzip""": GzipExtractor, """zip""": ZipExtractor, """xz""": XzExtractor, """rar""": RarExtractor, """zstd""": ZstdExtractor, """bz2""": BzipaExtractor, """7z""": SevenZipExtractor, # <Added version="2.4.0"/> """lz4""": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def snake_case ( cls ): '''simple docstring''' return max( len(lowerCAmelCase_ ) for extractor in cls.extractors.values() if issubclass(lowerCAmelCase_ , lowerCAmelCase_ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def snake_case ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' try: return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase_ , magic_number_length=lowerCAmelCase_ ) except OSError: return b"" @classmethod def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' warnings.warn( 'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ' 'Use \'infer_extractor_format\' instead.' , category=lowerCAmelCase_ , ) lowerCAmelCase__ :str = cls.infer_extractor_format(lowerCAmelCase_ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def snake_case ( cls , __UpperCAmelCase ): # <Added version="2.4.0"/> '''simple docstring''' lowerCAmelCase__ :List[str] = cls._get_magic_number_max_length() lowerCAmelCase__ :Tuple = cls._read_magic_number(lowerCAmelCase_ , lowerCAmelCase_ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(lowerCAmelCase_ , magic_number=lowerCAmelCase_ ): return extractor_format @classmethod def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = "deprecated" , ): '''simple docstring''' os.makedirs(os.path.dirname(lowerCAmelCase_ ) , exist_ok=lowerCAmelCase_ ) # Prevent parallel extractions lowerCAmelCase__ :Union[str, Any] = str(Path(lowerCAmelCase_ ).with_suffix('.lock' ) ) with FileLock(lowerCAmelCase_ ): shutil.rmtree(lowerCAmelCase_ , ignore_errors=lowerCAmelCase_ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # passed as positional arg warnings.warn( 'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ' 'Use \'extractor_format\' instead.' , category=lowerCAmelCase_ , ) lowerCAmelCase__ :List[str] = extractor if extractor != 'deprecated' else extractor_format else: lowerCAmelCase__ :Dict = cls.extractors[extractor_format] return extractor.extract(lowerCAmelCase_ , lowerCAmelCase_ ) else: warnings.warn( 'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ' 'exception in 3.0.0.' , category=lowerCAmelCase_ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(lowerCAmelCase_ ): return extractor.extract(lowerCAmelCase_ , lowerCAmelCase_ )
93
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case : str = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[str] = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[int] = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : List[Any] = ['LayoutLMv3FeatureExtractor'] _snake_case : Tuple = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys _snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
22
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCamelCase : Optional[Any] ={ 'configuration_clip': [ 'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPConfig', 'CLIPOnnxConfig', 'CLIPTextConfig', 'CLIPVisionConfig', ], 'processing_clip': ['CLIPProcessor'], 'tokenization_clip': ['CLIPTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : List[Any] =['CLIPTokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : int =['CLIPFeatureExtractor'] _UpperCamelCase : int =['CLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Tuple =[ 'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPModel', 'CLIPPreTrainedModel', 'CLIPTextModel', 'CLIPTextModelWithProjection', 'CLIPVisionModel', 'CLIPVisionModelWithProjection', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[Any] =[ 'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCLIPModel', 'TFCLIPPreTrainedModel', 'TFCLIPTextModel', 'TFCLIPVisionModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Any =[ 'FlaxCLIPModel', 'FlaxCLIPPreTrainedModel', 'FlaxCLIPTextModel', 'FlaxCLIPTextPreTrainedModel', 'FlaxCLIPVisionModel', 'FlaxCLIPVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _UpperCamelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
316
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class A ( _a ): lowercase_ = (DDPMParallelScheduler,) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]: """simple docstring""" _a = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**lowerCAmelCase_ ) return config def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ): self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Optional[Any]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , ) def __lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=lowerCAmelCase_ ) def __lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5 def __lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = self.dummy_sample_deter + 0.1 _a = self.dummy_sample_deter - 0.1 _a = samplea.shape[0] _a = torch.stack([samplea, samplea, samplea] , dim=0 ) _a = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ ) _a = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _a = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2 assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3 def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config(prediction_type='''v_prediction''' ) _a = scheduler_class(**lowerCAmelCase_ ) _a = len(lowerCAmelCase_ ) _a = self.dummy_model() _a = self.dummy_sample_deter _a = torch.manual_seed(0 ) for t in reversed(range(lowerCAmelCase_ ) ): # 1. predict noise residual _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # 2. predict previous mean of sample x_t-1 _a = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample _a = pred_prev_sample _a = torch.sum(torch.abs(lowerCAmelCase_ ) ) _a = torch.mean(torch.abs(lowerCAmelCase_ ) ) assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3 def __lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) _a = scheduler.timesteps for i, timestep in enumerate(lowerCAmelCase_ ): if i == len(lowerCAmelCase_ ) - 1: _a = -1 else: _a = timesteps[i + 1] _a = scheduler.previous_timestep(lowerCAmelCase_ ) _a = prev_t.item() self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 51, 0] with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [1_00, 87, 50, 1, 0] _a = len(lowerCAmelCase_ ) with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" _a = self.scheduler_classes[0] _a = self.get_scheduler_config() _a = scheduler_class(**lowerCAmelCase_ ) _a = [scheduler.config.num_train_timesteps] with self.assertRaises( lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
22
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_a ) class lowerCAmelCase__ ( _a ): A_ : Dict = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) A_ : Dict = Features({'text': Value('string' )} ) A_ : Union[str, Any] = Features({} ) A_ : List[Any] = 'text' @property def __UpperCamelCase ( self : List[str] ) -> Dict[str, str]: return {self.text_column: "text"}
106
'''simple docstring''' import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def snake_case_ (UpperCamelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ): '''simple docstring''' _a = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(UpperCamelCase , UpperCamelCase ) # Predict target for test data _a = xgb.predict(UpperCamelCase ) _a = predictions.reshape(len(UpperCamelCase ) , 1 ) return predictions def snake_case_ (): '''simple docstring''' _a = fetch_california_housing() _a , _a = data_handling(UpperCamelCase ) _a , _a , _a , _a = train_test_split( UpperCamelCase , UpperCamelCase , test_size=0.25 , random_state=1 ) _a = xgboost(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Error printing print(f'Mean Absolute Error : {mean_absolute_error(UpperCamelCase , UpperCamelCase )}' ) print(f'Mean Square Error : {mean_squared_error(UpperCamelCase , UpperCamelCase )}' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
22
0
def lowerCAmelCase__( lowercase : int = 200_0000 ) -> List[str]: __snake_case : Optional[Any] = [0 for i in range(n + 1 )] __snake_case : int = 1 __snake_case : Tuple = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , lowercase ): __snake_case : int = 1 __snake_case : Any = 0 for i in range(lowercase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F'''{solution() = }''')
243
'''simple docstring''' import qiskit def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' _a = qiskit.Aer.get_backend('''aer_simulator''' ) _a = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator _a = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(UpperCamelCase ) if __name__ == "__main__": _snake_case : Tuple = half_adder(1, 1) print(F'''Half Adder Output Qubit Counts: {counts}''')
22
0
'''simple docstring''' import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class lowerCAmelCase : def __init__( self : Tuple , a__ : str ): '''simple docstring''' if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden lowerCAmelCase__ : Optional[Any] = deepcopy(lowerCAmelCase_ ) elif os.path.exists(lowerCAmelCase_ ): with io.open(lowerCAmelCase_ , "r" , encoding="utf-8" ) as f: lowerCAmelCase__ : int = json.load(lowerCAmelCase_ ) else: try: lowerCAmelCase__ : Tuple = baseaa.urlsafe_baadecode(lowerCAmelCase_ ).decode("utf-8" ) lowerCAmelCase__ : Optional[Any] = json.loads(lowerCAmelCase_ ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' ) lowerCAmelCase__ : List[Any] = config self.set_stage_and_offload() def _A ( self : int ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.get_value("zero_optimization.stage" , -1 ) # offload lowerCAmelCase__ : Union[str, Any] = False if self.is_zeroa() or self.is_zeroa(): lowerCAmelCase__ : str = set(["cpu", "nvme"] ) lowerCAmelCase__ : Tuple = set( [ self.get_value("zero_optimization.offload_optimizer.device" ), self.get_value("zero_optimization.offload_param.device" ), ] ) if len(offload_devices & offload_devices_valid ) > 0: lowerCAmelCase__ : int = True def _A ( self : Tuple , a__ : Any ): '''simple docstring''' lowerCAmelCase__ : List[Any] = self.config # find the config node of interest if it exists lowerCAmelCase__ : Any = ds_key_long.split("." ) lowerCAmelCase__ : List[Any] = nodes.pop() for node in nodes: lowerCAmelCase__ : str = config.get(lowerCAmelCase_ ) if config is None: return None, ds_key return config, ds_key def _A ( self : int , a__ : List[Any] , a__ : Optional[Any]=None ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.find_config_node(lowerCAmelCase_ ) if config is None: return default return config.get(lowerCAmelCase_ , lowerCAmelCase_ ) def _A ( self : str , a__ : str , a__ : str=False ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.config # find the config node of interest if it exists lowerCAmelCase__ : Tuple = ds_key_long.split("." ) for node in nodes: lowerCAmelCase__ : List[Any] = config lowerCAmelCase__ : Optional[int] = config.get(lowerCAmelCase_ ) if config is None: if must_exist: raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' ) else: return # if found remove it if parent_config is not None: parent_config.pop(lowerCAmelCase_ ) def _A ( self : Optional[Any] , a__ : List[str] ): '''simple docstring''' lowerCAmelCase__ : Any = self.get_value(lowerCAmelCase_ ) return False if value is None else bool(lowerCAmelCase_ ) def _A ( self : List[str] , a__ : Tuple ): '''simple docstring''' lowerCAmelCase__ : Dict = self.get_value(lowerCAmelCase_ ) return False if value is None else not bool(lowerCAmelCase_ ) def _A ( self : Optional[int] ): '''simple docstring''' return self._stage == 2 def _A ( self : Any ): '''simple docstring''' return self._stage == 3 def _A ( self : List[Any] ): '''simple docstring''' return self._offload class lowerCAmelCase : def __init__( self : int , a__ : Any ): '''simple docstring''' lowerCAmelCase__ : str = engine def _A ( self : str , a__ : List[Any] , **a__ : str ): '''simple docstring''' self.engine.backward(lowerCAmelCase_ , **lowerCAmelCase_ ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class lowerCAmelCase ( _a ): def __init__( self : Optional[int] , a__ : Any ): '''simple docstring''' super().__init__(lowerCAmelCase_ , device_placement=lowerCAmelCase_ , scaler=lowerCAmelCase_ ) lowerCAmelCase__ : List[str] = hasattr(self.optimizer , "overflow" ) def _A ( self : int , a__ : Any=None ): '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def _A ( self : Dict ): '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def _A ( self : List[Any] ): '''simple docstring''' if self.__has_overflow__: return self.optimizer.overflow return False class lowerCAmelCase ( _a ): def __init__( self : Dict , a__ : str , a__ : Tuple ): '''simple docstring''' super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) def _A ( self : Tuple ): '''simple docstring''' pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class lowerCAmelCase : def __init__( self : Optional[Any] , a__ : Tuple , a__ : Union[str, Any]=0.001 , a__ : Optional[int]=0 , **a__ : Any ): '''simple docstring''' lowerCAmelCase__ : List[str] = params lowerCAmelCase__ : int = lr lowerCAmelCase__ : Union[str, Any] = weight_decay lowerCAmelCase__ : str = kwargs class lowerCAmelCase : def __init__( self : Any , a__ : Union[str, Any] , a__ : List[str]=None , a__ : int=0 , **a__ : Any ): '''simple docstring''' lowerCAmelCase__ : Dict = optimizer lowerCAmelCase__ : Any = total_num_steps lowerCAmelCase__ : str = warmup_num_steps lowerCAmelCase__ : str = kwargs
378
'''simple docstring''' from collections.abc import Generator from math import sin def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) != 32: raise ValueError('''Input must be of length 32''' ) _a = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''08x''' )[-8:] _a = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = B'''''' for char in message: bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' ) _a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(UpperCamelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' if len(UpperCamelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(UpperCamelCase ) , 512 ): _a = bit_string[pos : pos + 512] _a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def snake_case_ (UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) _a = format(UpperCamelCase , '''032b''' ) _a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(UpperCamelCase , 2 ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' return (a + b) % 2**32 def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def snake_case_ (UpperCamelCase : bytes ): '''simple docstring''' _a = preprocess(UpperCamelCase ) _a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _a = 0X67452301 _a = 0Xefcdab89 _a = 0X98badcfe _a = 0X10325476 _a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(UpperCamelCase ): _a = aa _a = ba _a = ca _a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _a = d ^ (b & (c ^ d)) _a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _a = c ^ (d & (b ^ c)) _a = (5 * i + 1) % 16 elif i <= 47: _a = b ^ c ^ d _a = (3 * i + 5) % 16 else: _a = c ^ (b | not_aa(UpperCamelCase )) _a = (7 * i) % 16 _a = (f + a + added_consts[i] + block_words[g]) % 2**32 _a = d _a = c _a = b _a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) ) # Add hashed chunk to running total _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = sum_aa(UpperCamelCase , UpperCamelCase ) _a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
22
0
"""simple docstring""" from datetime import datetime import requests def lowercase (_snake_case ) -> int: '''simple docstring''' __UpperCamelCase = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url=" __UpperCamelCase = requests.get(base_url + url ).json()[0]["urls"][0]["src"] return requests.get(_snake_case ).content if __name__ == "__main__": _A = input("Enter Video/IGTV url: ").strip() _A = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(f"""Done. Video saved to disk as {file_name}.""")
505
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A ( unittest.TestCase ): def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=18 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : Optional[int]=4_00 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , ) -> Optional[Any]: """simple docstring""" _a = size if size is not None else {'''height''': 18, '''width''': 18} _a = parent _a = batch_size _a = num_channels _a = image_size _a = min_resolution _a = max_resolution _a = do_resize _a = size _a = do_normalize def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4], [-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A ( _a ,unittest.TestCase ): lowercase_ = ImageGPTImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _a = ImageGPTImageProcessingTester(self ) @property def __lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self : List[str] ) -> Dict: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase_ , '''clusters''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase_ , '''do_normalize''' ) ) def __lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" _a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) _a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def __lowerCAmelCase ( self : str ) -> str: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) _a = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _a = os.path.join(lowerCAmelCase_ , '''image_processor.json''' ) image_processor_first.to_json_file(lowerCAmelCase_ ) _a = self.image_processing_class.from_json_file(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" _a = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowerCAmelCase_ ) _a = self.image_processing_class.from_pretrained(lowerCAmelCase_ ).to_dict() _a = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCAmelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCAmelCase_ ) @unittest.skip('''ImageGPT requires clusters at initialization''' ) def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" pass def snake_case_ (): '''simple docstring''' _a = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' ) _a = Image.open(dataset[4]['''file'''] ) _a = Image.open(dataset[5]['''file'''] ) _a = [imagea, imagea] return images @require_vision @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _a = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' ) _a = prepare_images() # test non-batched _a = image_processing(images[0] , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 10_24) ) _a = [3_06, 1_91, 1_91] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase_ ) # test batched _a = image_processing(lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 10_24) ) _a = [3_03, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase_ )
22
0
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowercase ( _a ): '''simple docstring''' def __init__(self , __a , __a = None , __a = None , __a = True , __a = None , __a = False , __a = None , __a = True , __a = "arrow" , **__a , ) -> Optional[int]: """simple docstring""" super().__init__( split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase__ = load_from_cache_file UpperCAmelCase__ = file_format UpperCAmelCase__ = Spark( df=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , working_dir=lowerCAmelCase_ , **lowerCAmelCase_ , ) def UpperCamelCase__ (self ) -> List[Any]: """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) UpperCAmelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=lowerCAmelCase_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
146
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" _a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _a = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _a = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _a = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _a = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) _a = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits _a = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean() _a = -(labels.shape[-1] * loss.item()) _a = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
22
0
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ): A : Tuple = StableDiffusionControlNetImgaImgPipeline A : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} A : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} ) A : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCamelCase ( self ): torch.manual_seed(0 ) lowercase : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowercase : List[str] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowercase : List[str] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , ) torch.manual_seed(0 ) lowercase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase : int = CLIPTextModel(lowerCAmelCase_ ) lowercase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase : Optional[Any] = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ): if str(lowerCAmelCase_ ).startswith('''mps''' ): lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase_ ) else: lowercase : List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) lowercase : Tuple = 2 lowercase : List[str] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , ) lowercase : Tuple = floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) lowercase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase : Union[str, Any] = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) ) lowercase : Any = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def __lowerCamelCase ( self ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCamelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCamelCase ( self ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ): A : List[str] = StableDiffusionControlNetImgaImgPipeline A : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} A : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __lowerCamelCase ( self ): torch.manual_seed(0 ) lowercase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(SCREAMING_SNAKE_CASE__ ): if isinstance(lowerCAmelCase_ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowercase : Union[str, Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase_ ) torch.manual_seed(0 ) lowercase : Union[str, Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(lowerCAmelCase_ ) torch.manual_seed(0 ) lowercase : str = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , ) torch.manual_seed(0 ) lowercase : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase : Optional[int] = CLIPTextModel(lowerCAmelCase_ ) lowercase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase : Dict = MultiControlNetModel([controlneta, controlneta] ) lowercase : Tuple = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ): if str(lowerCAmelCase_ ).startswith('''mps''' ): lowercase : List[Any] = torch.manual_seed(lowerCAmelCase_ ) else: lowercase : List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) lowercase : int = 2 lowercase : int = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , ), ] lowercase : Optional[int] = floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ ) lowercase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase : List[str] = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) ) lowercase : int = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def __lowerCamelCase ( self ): lowercase : Dict = self.get_dummy_components() lowercase : int = self.pipeline_class(**lowerCAmelCase_ ) pipe.to(lowerCAmelCase_ ) lowercase : Union[str, Any] = 10.0 lowercase : str = 4 lowercase : str = self.get_dummy_inputs(lowerCAmelCase_ ) lowercase : Tuple = steps lowercase : Tuple = scale lowercase : int = pipe(**lowerCAmelCase_ )[0] lowercase : List[Any] = self.get_dummy_inputs(lowerCAmelCase_ ) lowercase : Tuple = steps lowercase : Optional[Any] = scale lowercase : Dict = pipe(**lowerCAmelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowercase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ ) lowercase : Optional[int] = steps lowercase : Optional[int] = scale lowercase : int = pipe(**lowerCAmelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowercase : str = self.get_dummy_inputs(lowerCAmelCase_ ) lowercase : Optional[Any] = steps lowercase : List[str] = scale lowercase : int = pipe(**lowerCAmelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def __lowerCamelCase ( self ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCamelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def __lowerCamelCase ( self ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def __lowerCamelCase ( self ): lowercase : int = self.get_dummy_components() lowercase : int = self.pipeline_class(**lowerCAmelCase_ ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(lowerCAmelCase_ ) except NotImplementedError: pass @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): lowercase : Dict = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) lowercase : Optional[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ , controlnet=lowerCAmelCase_ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) lowercase : int = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase : str = '''evil space-punk bird''' lowercase : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) ) lowercase : Tuple = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) ) lowercase : List[str] = pipe( lowerCAmelCase_ , lowerCAmelCase_ , control_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) lowercase : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) lowercase : str = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9E-2
319
'''simple docstring''' from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _snake_case : Optional[Any] = 8 def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ): '''simple docstring''' _a = x.device _a = (x * 255).int().clamp(0 , 255 ) _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' ) _a = ((x & mask) != 0).float() _a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' ) _a = bits * 2 - 1 return bits def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ): '''simple docstring''' _a = x.device _a = (x > 0).int() _a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa ) _a = rearrange(UpperCamelCase , '''d -> d 1 1''' ) _a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 ) _a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' ) return (dec / 255).clamp(0.0 , 1.0 ) def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _a = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _a = self.alphas_cumprod[timestep] _a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _a = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) _a = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu''' _a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase ) _a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise _a = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ): '''simple docstring''' _a = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 ) else: _a = None # 1. compute alphas, betas _a = self.alphas_cumprod[t] _a = self.alphas_cumprod[t - 1] if t > 0 else self.one _a = 1 - alpha_prod_t _a = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _a = model_output else: raise ValueError(f'Unsupported prediction_type {prediction_type}.' ) # 3. Clip "predicted x_0" _a = self.bit_scale if self.config.clip_sample: _a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _a = 0 if t > 0: _a = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device ) _a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise _a = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase ) class A ( _a ): def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int: """simple docstring""" super().__init__() _a = bit_scale _a = ( ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step ) self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) @torch.no_grad() def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" _a = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , ) _a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale _a = latents.to(self.device ) self.scheduler.set_timesteps(lowerCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample # compute the previous noisy sample x_t -> x_t-1 _a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample _a = bits_to_decimal(lowerCAmelCase_ ) if output_type == "pil": _a = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
22
0
import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) class __lowerCAmelCase ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 'AutoTokenizer' _SCREAMING_SNAKE_CASE = ['tokenizer'] _SCREAMING_SNAKE_CASE = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any]=None ) -> Dict: """simple docstring""" super().__init__(lowerCAmelCase_ ) snake_case_ = speaker_embeddings @classmethod def lowerCAmelCase__ ( cls : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]="speaker_embeddings_path.json" , **_lowerCAmelCase : Any ) -> str: """simple docstring""" if speaker_embeddings_dict_path is not None: snake_case_ = get_file_from_repo( lowerCAmelCase_ , lowerCAmelCase_ , subfolder=kwargs.pop("subfolder" , lowerCAmelCase_ ) , cache_dir=kwargs.pop("cache_dir" , lowerCAmelCase_ ) , force_download=kwargs.pop("force_download" , lowerCAmelCase_ ) , proxies=kwargs.pop("proxies" , lowerCAmelCase_ ) , resume_download=kwargs.pop("resume_download" , lowerCAmelCase_ ) , local_files_only=kwargs.pop("local_files_only" , lowerCAmelCase_ ) , use_auth_token=kwargs.pop("use_auth_token" , lowerCAmelCase_ ) , revision=kwargs.pop("revision" , lowerCAmelCase_ ) , ) if speaker_embeddings_path is None: logger.warning( F'''`{os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) snake_case_ = None else: with open(lowerCAmelCase_ ) as speaker_embeddings_json: snake_case_ = json.load(lowerCAmelCase_ ) else: snake_case_ = None snake_case_ = AutoTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) return cls(tokenizer=lowerCAmelCase_ , speaker_embeddings=lowerCAmelCase_ ) def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any]="speaker_embeddings_path.json" , _lowerCAmelCase : Dict="speaker_embeddings" , _lowerCAmelCase : bool = False , **_lowerCAmelCase : List[str] , ) -> str: """simple docstring""" if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , "v2" ) , exist_ok=lowerCAmelCase_ ) snake_case_ = {} snake_case_ = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": snake_case_ = self._load_voice_preset(lowerCAmelCase_ ) snake_case_ = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["repo_or_path"] , lowerCAmelCase_ , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowerCAmelCase_ , ) snake_case_ = os.path.join(lowerCAmelCase_ , F'''{prompt_key}_{key}.npy''' ) snake_case_ = tmp_dict with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , "w" ) as fp: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : str = None , **_lowerCAmelCase : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ = self.speaker_embeddings[voice_preset] snake_case_ = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) snake_case_ = get_file_from_repo( self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , lowerCAmelCase_ ) , cache_dir=kwargs.pop("cache_dir" , lowerCAmelCase_ ) , force_download=kwargs.pop("force_download" , lowerCAmelCase_ ) , proxies=kwargs.pop("proxies" , lowerCAmelCase_ ) , resume_download=kwargs.pop("resume_download" , lowerCAmelCase_ ) , local_files_only=kwargs.pop("local_files_only" , lowerCAmelCase_ ) , use_auth_token=kwargs.pop("use_auth_token" , lowerCAmelCase_ ) , revision=kwargs.pop("revision" , lowerCAmelCase_ ) , ) if path is None: raise ValueError( F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.''' ) snake_case_ = np.load(lowerCAmelCase_ ) return voice_preset_dict def lowerCAmelCase__ ( self : int , _lowerCAmelCase : Optional[dict] = None ) -> Any: """simple docstring""" for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self : Union[str, Any] , _lowerCAmelCase : int=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict="pt" , _lowerCAmelCase : Dict=2_5_6 , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : int , ) -> str: """simple docstring""" if voice_preset is not None and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): snake_case_ = self._load_voice_preset(lowerCAmelCase_ ) else: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not voice_preset.endswith(".npz" ): snake_case_ = voice_preset + ".npz" snake_case_ = np.load(lowerCAmelCase_ ) if voice_preset is not None: self._validate_voice_preset_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) snake_case_ = BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) snake_case_ = self.tokenizer( lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding="max_length" , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) if voice_preset is not None: snake_case_ = voice_preset return encoded_text
283
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : Any = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A ( _a ): lowercase_ = 'roformer' def __init__( self : str , lowerCAmelCase_ : int=5_00_00 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : List[str]=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : int=15_36 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Tuple=True , **lowerCAmelCase_ : Optional[int] , ) -> str: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) _a = vocab_size _a = hidden_size if embedding_size is None else embedding_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = layer_norm_eps _a = rotary_value _a = use_cache class A ( _a ): @property def __lowerCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} _a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
22
0
import math import qiskit def __lowerCamelCase (UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1 ): if ( isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) ): raise TypeError("inputs must be integers." ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("inputs must be positive." ) if ( (math.floor(UpperCAmelCase__ ) != input_a) or (math.floor(UpperCAmelCase__ ) != input_a) or (math.floor(UpperCAmelCase__ ) != carry_in) ): raise ValueError("inputs must be exact integers." ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("inputs must be less or equal to 2." ) # build registers SCREAMING_SNAKE_CASE = qiskit.QuantumRegister(4 , "qr" ) SCREAMING_SNAKE_CASE = qiskit.ClassicalRegister(2 , "cr" ) # list the entries SCREAMING_SNAKE_CASE = [input_a, input_a, carry_in] SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(UpperCAmelCase__ , UpperCAmelCase__ ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(UpperCAmelCase__ ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(UpperCAmelCase__ ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(UpperCAmelCase__ ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , UpperCAmelCase__ ) # measure the last two qbits SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend("aer_simulator" ) SCREAMING_SNAKE_CASE = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1_0_0_0 ) return job.result().get_counts(UpperCAmelCase__ ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
403
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A : lowercase_ = 42 lowercase_ = 42 class A : def __init__( self : Optional[Any] , lowerCAmelCase_ : int ) -> str: """simple docstring""" _a = [[] for _ in range(lowerCAmelCase_ )] _a = size def __getitem__( self : Any , lowerCAmelCase_ : int ) -> Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex] ) @property def __lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" return self._size def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict: """simple docstring""" if weight not in (0, 1): raise ValueError('''Edge weight must be either 0 or 1.''' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('''Vertex indexes must be in [0; size).''' ) self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None: """simple docstring""" _a = deque([start_vertex] ) _a = [None] * self.size _a = 0 while queue: _a = queue.popleft() _a = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _a = current_distance + edge.weight _a = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and new_distance >= dest_vertex_distance ): continue _a = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('''No path from start_vertex to finish_vertex.''' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
22
0
import unittest from knapsack import greedy_knapsack as kp class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" def _a ( self ): UpperCamelCase_: int = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0] UpperCamelCase_: str = [2, 4, 6, 8, 1_0, 1_2] UpperCamelCase_: Tuple = 1_0_0 self.assertEqual(kp.calc_profit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , 2_1_0 ) def _a ( self ): self.assertRaisesRegex(lowerCAmelCase_ , 'max_weight must greater than zero.' ) def _a ( self ): self.assertRaisesRegex(lowerCAmelCase_ , 'Weight can not be negative.' ) def _a ( self ): self.assertRaisesRegex(lowerCAmelCase_ , 'Profit can not be negative.' ) def _a ( self ): self.assertRaisesRegex(lowerCAmelCase_ , 'max_weight must greater than zero.' ) def _a ( self ): self.assertRaisesRegex( lowerCAmelCase_ , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
57
'''simple docstring''' from math import pi, sqrt def snake_case_ (UpperCamelCase : float ): '''simple docstring''' if num <= 0: raise ValueError('''math domain error''' ) if num > 171.5: raise OverflowError('''math range error''' ) elif num - int(UpperCamelCase ) not in (0, 0.5): raise NotImplementedError('''num must be an integer or a half-integer''' ) elif num == 0.5: return sqrt(UpperCamelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def snake_case_ (): '''simple docstring''' assert gamma(0.5 ) == sqrt(UpperCamelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() _snake_case : Optional[Any] = 1.0 while num: _snake_case : Dict = float(input('Gamma of: ')) print(F'''gamma({num}) = {gamma(num)}''') print('\nEnter 0 to exit...')
22
0
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class _lowerCAmelCase : """simple docstring""" __magic_name__ :Dict = 42 __magic_name__ :int = 42 class _lowerCAmelCase : """simple docstring""" def __init__( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = [[] for _ in range(lowerCAmelCase_ )] lowerCAmelCase__ :str = size def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' return iter(self._graph[vertex] ) @property def snake_case ( self ): '''simple docstring''' return self._size def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = deque([start_vertex] ) lowerCAmelCase__ :Union[str, Any] = [None] * self.size lowerCAmelCase__ :int = 0 while queue: lowerCAmelCase__ :Any = queue.popleft() lowerCAmelCase__ :Dict = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowerCAmelCase__ :Any = current_distance + edge.weight lowerCAmelCase__ :int = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and new_distance >= dest_vertex_distance ): continue lowerCAmelCase__ :Optional[int] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
93
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_euler''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe([prompt] , generator=lowerCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def __lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" _a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) _a = sd_pipe.to(lowerCAmelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) sd_pipe.set_scheduler('''sample_dpmpp_2m''' ) _a = '''A painting of a squirrel eating a burger''' _a = torch.manual_seed(0 ) _a = sd_pipe( [prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=lowerCAmelCase_ , ) _a = output.images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _a = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
22
0
'''simple docstring''' _UpperCamelCase : int ='0.21.0' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
316
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets _snake_case : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' _snake_case : Any = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' _snake_case : List[Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def snake_case_ (UpperCamelCase : Tuple ): '''simple docstring''' def remove_articles(UpperCamelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(UpperCamelCase , ''' ''' , UpperCamelCase ) def white_space_fix(UpperCamelCase : Union[str, Any] ): return " ".join(text.split() ) def remove_punc(UpperCamelCase : str ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(UpperCamelCase : Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase ) ) ) ) def snake_case_ (UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' return int(normalize_answer(UpperCamelCase ) == normalize_answer(UpperCamelCase ) ) def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : List[str] ): '''simple docstring''' _a = [any(compute_exact(UpperCamelCase , UpperCamelCase ) for ref in refs ) for pred, refs in zip(UpperCamelCase , UpperCamelCase )] return (sum(UpperCamelCase ) / len(UpperCamelCase )) * 100 def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(UpperCamelCase ) _a = Counter(UpperCamelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(UpperCamelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = keeptmpscorea / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(UpperCamelCase ) > 0: _a = deltmpscorea / len(UpperCamelCase ) # ADDITION _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = set(UpperCamelCase ) & set(UpperCamelCase ) _a = set(UpperCamelCase ) - set(UpperCamelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) if len(UpperCamelCase ) > 0: _a = addtmpscore / len(UpperCamelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' _a = len(UpperCamelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) ragramslist.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(UpperCamelCase ) for i in range(0 , len(UpperCamelCase ) - 1 ): if i < len(UpperCamelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(UpperCamelCase ) if i < len(UpperCamelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) ((_a) , (_a) , (_a)) = SARIngram(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def snake_case_ (UpperCamelCase : str , UpperCamelCase : bool = True , UpperCamelCase : str = "13a" , UpperCamelCase : bool = True ): '''simple docstring''' if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(UpperCamelCase )()(UpperCamelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(UpperCamelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(UpperCamelCase , return_str=UpperCamelCase , escape=UpperCamelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(UpperCamelCase , return_str=UpperCamelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def snake_case_ (UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' if not (len(UpperCamelCase ) == len(UpperCamelCase ) == len(UpperCamelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(UpperCamelCase , UpperCamelCase , UpperCamelCase ): sari_score += SARIsent(normalize(UpperCamelCase ) , normalize(UpperCamelCase ) , [normalize(UpperCamelCase ) for sent in refs] ) _a = sari_score / len(UpperCamelCase ) return 100 * sari_score def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str]="exp" , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=False , ): '''simple docstring''' _a = len(references[0] ) if any(len(UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(UpperCamelCase )] _a = sacrebleu.corpus_bleu( UpperCamelCase , UpperCamelCase , smooth_method=UpperCamelCase , smooth_value=UpperCamelCase , force=UpperCamelCase , lowercase=UpperCamelCase , use_effective_order=UpperCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self : Tuple ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Dict: """simple docstring""" _a = {} result.update({'''sari''': compute_sari(sources=lowerCAmelCase_ , predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) result.update({'''exact''': compute_em(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )} ) return result
22
0
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCAmelCase__ ( _a ): A_ : int = 'char' A_ : Dict = 'bpe' A_ : Any = 'wp' __snake_case :Optional[int] =(DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCAmelCase__ ( _a ): A_ : Optional[int] = ['image_processor', 'char_tokenizer'] A_ : Optional[Any] = 'ViTImageProcessor' A_ : Tuple = 'MgpstrTokenizer' def __init__( self : Dict , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : str=None , **__UpperCamelCase : List[Any] ) -> Optional[Any]: A = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCAmelCase_ , ) A = kwargs.pop('feature_extractor' ) A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) A = tokenizer A = AutoTokenizer.from_pretrained('gpt2' ) A = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) def __call__( self : str , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , **__UpperCamelCase : Tuple ) -> Optional[Any]: if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: A = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None: A = self.char_tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is None: return inputs elif images is None: return encodings else: A = encodings['input_ids'] return inputs def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Optional[Any] ) -> List[Any]: A , A , A = sequences A = char_preds.size(0 ) A , A = self._decode_helper(lowerCAmelCase_ , 'char' ) A , A = self._decode_helper(lowerCAmelCase_ , 'bpe' ) A , A = self._decode_helper(lowerCAmelCase_ , 'wp' ) A = [] A = [] for i in range(lowerCAmelCase_ ): A = [char_scores[i], bpe_scores[i], wp_scores[i]] A = [char_strs[i], bpe_strs[i], wp_strs[i]] A = scores.index(max(lowerCAmelCase_ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) A = {} A = final_strs A = final_scores A = char_strs A = bpe_strs A = wp_strs return out def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ) -> Dict: if format == DecodeType.CHARACTER: A = self.char_decode A = 1 A = '[s]' elif format == DecodeType.BPE: A = self.bpe_decode A = 2 A = '#' elif format == DecodeType.WORDPIECE: A = self.wp_decode A = 102 A = '[SEP]' else: raise ValueError(f'''Format {format} is not supported.''' ) A , A = [], [] A = pred_logits.size(0 ) A = pred_logits.size(1 ) A , A = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase_ , sorted=lowerCAmelCase_ ) A = preds_index.view(-1 , lowerCAmelCase_ )[:, 1:] A = decoder(lowerCAmelCase_ ) A , A = torch.nn.functional.softmax(lowerCAmelCase_ , dim=2 ).max(dim=2 ) A = preds_max_prob[:, 1:] for index in range(lowerCAmelCase_ ): A = preds_str[index].find(lowerCAmelCase_ ) A = preds_str[index][:pred_eos] A = preds_index[index].cpu().tolist() A = pred_index.index(lowerCAmelCase_ ) if eos_token in pred_index else -1 A = preds_max_prob[index][: pred_eos_index + 1] A = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowerCAmelCase_ ) conf_scores.append(lowerCAmelCase_ ) return dec_strs, conf_scores def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[int] ) -> Union[str, Any]: A = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase_ )] return decode_strs def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[Any] ) -> Tuple: return self.bpe_tokenizer.batch_decode(lowerCAmelCase_ ) def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Dict ) -> Optional[Any]: A = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase_ )] return decode_strs
106
'''simple docstring''' import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'): _snake_case : Tuple = { 'linear': PIL.Image.Resampling.BILINEAR, 'bilinear': PIL.Image.Resampling.BILINEAR, 'bicubic': PIL.Image.Resampling.BICUBIC, 'lanczos': PIL.Image.Resampling.LANCZOS, 'nearest': PIL.Image.Resampling.NEAREST, } else: _snake_case : Any = { 'linear': PIL.Image.LINEAR, 'bilinear': PIL.Image.BILINEAR, 'bicubic': PIL.Image.BICUBIC, 'lanczos': PIL.Image.LANCZOS, 'nearest': PIL.Image.NEAREST, } def snake_case_ (UpperCamelCase : Optional[int] ): '''simple docstring''' _a = (images / 2 + 0.5).clamp(0 , 1 ) _a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() _a = numpy_to_pil(UpperCamelCase ) return images def snake_case_ (UpperCamelCase : str ): '''simple docstring''' if images.ndim == 3: _a = images[None, ...] _a = (images * 255).round().astype('''uint8''' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images _a = [Image.fromarray(image.squeeze() , mode='''L''' ) for image in images] else: _a = [Image.fromarray(UpperCamelCase ) for image in images] return pil_images
22
0
import math def lowerCAmelCase__( lowercase : int ) -> List[Any]: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase__( lowercase : int = 1_0001 ) -> str: try: __snake_case : Union[str, Any] = int(lowercase ) except (TypeError, ValueError): raise TypeError("Parameter nth must be int or castable to int." ) from None if nth <= 0: raise ValueError("Parameter nth must be greater than or equal to one." ) __snake_case : Optional[Any] = [] __snake_case : int = 2 while len(lowercase ) < nth: if is_prime(lowercase ): primes.append(lowercase ) num += 1 else: num += 1 return primes[len(lowercase ) - 1] if __name__ == "__main__": print(F'''{solution() = }''')
243
'''simple docstring''' import requests def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ): '''simple docstring''' _a = {'''Content-Type''': '''application/json'''} _a = requests.post(UpperCamelCase , json={'''text''': message_body} , headers=UpperCamelCase ) if response.status_code != 200: _a = ( '''Request to slack returned an error ''' f'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(UpperCamelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
22
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available snake_case = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
378
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch _snake_case : Tuple = logging.get_logger(__name__) class A ( _a ): lowercase_ = ['pixel_values'] def __init__( self : str , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_55 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase_ : Any , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_ ) _a = size if size is not None else {'''shortest_edge''': 2_56} _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(lowerCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase_ ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[Any] , ) -> np.ndarray: """simple docstring""" _a = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' ) return center_crop(lowerCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple ) -> np.ndarray: """simple docstring""" return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(lowerCAmelCase_ , param_name='''crop_size''' ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: _a = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: _a = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: _a = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: _a = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] _a = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Tuple] = None ) -> Any: """simple docstring""" _a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase_ ): _a = target_sizes.numpy() _a = [] for idx in range(len(lowerCAmelCase_ ) ): _a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase_ ) _a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase_ ) else: _a = logits.argmax(dim=1 ) _a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
0
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCAmelCase ( _a , unittest.TestCase ): """simple docstring""" _snake_case : int = LEDTokenizer _snake_case : Any = LEDTokenizerFast _snake_case : Any = True def A ( self : int )-> List[Any]: super().setUp() __UpperCamelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] __UpperCamelCase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) __UpperCamelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] __UpperCamelCase = {"unk_token": "<unk>"} __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase_ ) ) def A ( self : Union[str, Any] , **A_ : int )-> Optional[int]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def A ( self : Optional[Any] , **A_ : Any )-> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def A ( self : Optional[Any] , A_ : Dict )-> List[str]: return "lower newer", "lower newer" @cached_property def A ( self : Dict )-> int: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def A ( self : List[str] )-> Union[str, Any]: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def A ( self : int )-> Tuple: __UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] __UpperCamelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors="pt" ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def A ( self : Tuple )-> List[Any]: __UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowerCAmelCase_ ) self.assertIn("attention_mask" , lowerCAmelCase_ ) self.assertNotIn("labels" , lowerCAmelCase_ ) self.assertNotIn("decoder_attention_mask" , lowerCAmelCase_ ) @require_torch def A ( self : List[str] )-> str: __UpperCamelCase = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def A ( self : Any )-> Union[str, Any]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="pt" ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def A ( self : Optional[Any] )-> Union[str, Any]: __UpperCamelCase = ["A long paragraph for summarization."] __UpperCamelCase = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer(lowerCAmelCase_ , return_tensors="pt" ) __UpperCamelCase = tokenizer(text_target=lowerCAmelCase_ , return_tensors="pt" ) __UpperCamelCase = inputs["input_ids"] __UpperCamelCase = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def A ( self : Any )-> Union[str, Any]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = ["Summary of the text.", "Another summary."] __UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCamelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) __UpperCamelCase = [[0] * len(lowerCAmelCase_ ) for x in encoded_output["input_ids"]] __UpperCamelCase = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowerCAmelCase_ ) def A ( self : Any )-> Dict: pass def A ( self : Any )-> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) __UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) __UpperCamelCase = "A, <mask> AllenNLP sentence." __UpperCamelCase = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) __UpperCamelCase = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) __UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) __UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowerCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
505
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def snake_case_ (UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int , UpperCamelCase : Optional[int] = None , ): '''simple docstring''' _a = {} if train_file is not None: _a = [train_file] if eval_file is not None: _a = [eval_file] if test_file is not None: _a = [test_file] _a = datasets.load_dataset('''csv''' , data_files=UpperCamelCase ) _a = list(ds[list(files.keys() )[0]].features.keys() ) _a = features_name.pop(UpperCamelCase ) _a = list(set(ds[list(files.keys() )[0]][label_name] ) ) _a = {label: i for i, label in enumerate(UpperCamelCase )} _a = tokenizer.model_input_names _a = {} if len(UpperCamelCase ) == 1: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' ) , batched=UpperCamelCase , ) elif len(UpperCamelCase ) == 2: for k in files.keys(): _a = ds[k].map( lambda UpperCamelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase , max_length=UpperCamelCase , padding='''max_length''' , ) , batched=UpperCamelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _a = ( tf.data.Dataset.from_generator( UpperCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid _snake_case : str = logging.getLogger(__name__) @dataclass class A : lowercase_ = field(metadata={'help': 'Which column contains the label'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the training file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the development file'} ) lowercase_ = field(default=_a ,metadata={'help': 'The path of the test file'} ) lowercase_ = field( default=128 ,metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } ,) lowercase_ = field( default=_a ,metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A : lowercase_ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowercase_ = field( default=_a ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowercase_ = field(default=_a ,metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowercase_ = field( default=_a ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,) def snake_case_ (): '''simple docstring''' _a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _a , _a , _a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _a , _a , _a , _a = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase ) , labelaid=UpperCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _a = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict: _a = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _a = TFTrainer( model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _a = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) _a = trainer.evaluate() _a = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(UpperCamelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(UpperCamelCase ) return results if __name__ == "__main__": main()
22
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = { 'configuration_mobilebert': [ 'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileBertConfig', 'MobileBertOnnxConfig', ], 'tokenization_mobilebert': ['MobileBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['MobileBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ 'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileBertForMaskedLM', 'MobileBertForMultipleChoice', 'MobileBertForNextSentencePrediction', 'MobileBertForPreTraining', 'MobileBertForQuestionAnswering', 'MobileBertForSequenceClassification', 'MobileBertForTokenClassification', 'MobileBertLayer', 'MobileBertModel', 'MobileBertPreTrainedModel', 'load_tf_weights_in_mobilebert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ 'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileBertForMaskedLM', 'TFMobileBertForMultipleChoice', 'TFMobileBertForNextSentencePrediction', 'TFMobileBertForPreTraining', 'TFMobileBertForQuestionAnswering', 'TFMobileBertForSequenceClassification', 'TFMobileBertForTokenClassification', 'TFMobileBertMainLayer', 'TFMobileBertModel', 'TFMobileBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
146
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( _a ,unittest.TestCase ): lowercase_ = LEDTokenizer lowercase_ = LEDTokenizerFast lowercase_ = True def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" super().setUp() _a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) _a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a = {'''unk_token''': '''<unk>'''} _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase_ ) ) def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]: """simple docstring""" return "lower newer", "lower newer" @cached_property def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def __lowerCAmelCase ( self : int ) -> Tuple: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _a = batch.input_ids.tolist()[0] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : Tuple ) -> List[Any]: """simple docstring""" _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , lowerCAmelCase_ ) self.assertIn('''attention_mask''' , lowerCAmelCase_ ) self.assertNotIn('''labels''' , lowerCAmelCase_ ) self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ ) @require_torch def __lowerCAmelCase ( self : List[str] ) -> str: """simple docstring""" _a = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer( ['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = ['''A long paragraph for summarization.'''] _a = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' ) _a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' ) _a = inputs['''input_ids'''] _a = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = ['''Summary of the text.''', '''Another summary.'''] _a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ ) _a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']] _a = tokenizer.pad(lowerCAmelCase_ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" pass def __lowerCAmelCase ( self : Any ) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = '''A, <mask> AllenNLP sentence.''' _a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) _a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
22
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def __lowercase ( _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" return (data["data"], data["target"]) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" lowercase : Union[str, Any] = XGBRegressor(verbosity=0, random_state=42 ) xgb.fit(_UpperCamelCase, _UpperCamelCase ) # Predict target for test data lowercase : Tuple = xgb.predict(_UpperCamelCase ) lowercase : Union[str, Any] = predictions.reshape(len(_UpperCamelCase ), 1 ) return predictions def __lowercase ( ) ->Dict: """simple docstring""" lowercase : Optional[Any] = fetch_california_housing() lowercase , lowercase : int = data_handling(_UpperCamelCase ) lowercase , lowercase , lowercase , lowercase : Dict = train_test_split( _UpperCamelCase, _UpperCamelCase, test_size=0.2_5, random_state=1 ) lowercase : int = xgboost(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(_UpperCamelCase, _UpperCamelCase )}""" ) print(f"""Mean Square Error : {mean_squared_error(_UpperCamelCase, _UpperCamelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
319
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''' , [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 , dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1337 , num_examples=42 )} ), SplitDict({'''train''': SplitInfo()} ), ] , ) def snake_case_ (UpperCamelCase : SplitDict ): '''simple docstring''' _a = split_dict._to_yaml_list() assert len(UpperCamelCase ) == len(UpperCamelCase ) _a = SplitDict._from_yaml_list(UpperCamelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump _a = None # the split name of split_dict takes over the name of the split info object _a = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''' , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase ), SplitInfo(dataset_name='''my_dataset''' )] ) def snake_case_ (UpperCamelCase : List[str] ): '''simple docstring''' _a = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
22
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler SCREAMING_SNAKE_CASE :List[Any] = 16 SCREAMING_SNAKE_CASE :Tuple = 32 def _lowerCAmelCase ( lowerCAmelCase_ :Accelerator , lowerCAmelCase_ :int = 16 , lowerCAmelCase_ :str = "bert-base-cased" )->int: '''simple docstring''' snake_case_ = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) snake_case_ = load_dataset("glue" , "mrpc" ) def tokenize_function(lowerCAmelCase_ :Optional[Any] ): # max_length=None => use the model max length (it's actually the default) snake_case_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case_ = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=lowerCAmelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(lowerCAmelCase_ :Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase_ , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(lowerCAmelCase_ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. snake_case_ = DataLoader( tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) snake_case_ = DataLoader( tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Optional[int] )->Tuple: '''simple docstring''' snake_case_ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ = config["lr"] snake_case_ = int(config["num_epochs"] ) snake_case_ = int(config["seed"] ) snake_case_ = int(config["batch_size"] ) snake_case_ = args.model_name_or_path set_seed(lowerCAmelCase_ ) snake_case_ , snake_case_ = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) # Instantiate optimizer snake_case_ = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case_ = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ ) if accelerator.state.deepspeed_plugin is not None: snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[ "gradient_accumulation_steps" ] else: snake_case_ = 1 snake_case_ = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case_ = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , ) else: snake_case_ = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # We need to keep track of how many total steps we have iterated over snake_case_ = 0 # We also need to keep track of the stating epoch so files are named properly snake_case_ = 0 # Now we train the model snake_case_ = evaluate.load("glue" , "mrpc" ) snake_case_ = 0 snake_case_ = {} for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): snake_case_ = model(**lowerCAmelCase_ ) snake_case_ = outputs.loss snake_case_ = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() snake_case_ = 0 for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ = model(**lowerCAmelCase_ ) snake_case_ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case_ , snake_case_ = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCAmelCase_ ) - 1: snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) snake_case_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase_ ) snake_case_ = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: snake_case_ = eval_metric["accuracy"] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}''' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def _lowerCAmelCase ( )->str: '''simple docstring''' snake_case_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." ) parser.add_argument( "--model_name_or_path" , type=lowerCAmelCase_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowerCAmelCase_ , ) parser.add_argument( "--output_dir" , type=lowerCAmelCase_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , ) parser.add_argument( "--performance_lower_bound" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , ) parser.add_argument( "--num_epochs" , type=lowerCAmelCase_ , default=3 , help="Number of train epochs." , ) snake_case_ = parser.parse_args() snake_case_ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
283
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _snake_case : List[str] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n' class A ( unittest.TestCase ): def __lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" _a = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) _a = self.diffusers_dir shutil.copy( os.path.join(lowerCAmelCase_ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _a = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCAmelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=None ) -> Union[str, Any]: """simple docstring""" _a = comment + F'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: _a = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result _a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) _a = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ ) _a = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(lowerCAmelCase_ , '''w''' , newline='''\n''' ) as f: f.write(lowerCAmelCase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase_ ) with open(lowerCAmelCase_ , '''r''' ) as f: self.assertTrue(f.read() , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCAmelCase_ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , ) # Copy consistency with a really long name _a = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( F'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , F'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , lowerCAmelCase_ , lowerCAmelCase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCAmelCase_ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCAmelCase_ ) , )
22
0