code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" lowercase__ = """Tobias Carryer""" from time import time class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase=int(time() ) ): # noqa: B008 _lowerCamelCase : Optional[int] = multiplier _lowerCamelCase : Dict = increment _lowerCamelCase : Optional[Any] = modulo _lowerCamelCase : List[str] = seed def A_ ( self ): _lowerCamelCase : Tuple = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. lowercase__ = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
362
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """philschmid/bart-large-cnn-samsum""" lowerCamelCase__ = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) lowerCamelCase__ = """summarizer""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = ["""text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase ): return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase )[0] def A_ ( self , lowercase ): return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
12
0
"""simple docstring""" def _snake_case ( lowercase__ ): if not all(x.isalpha() for x in string ): raise ValueError('String must only contain alphabetic characters.' ) _lowerCamelCase : Union[str, Any] = sorted(string.lower() ) return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) ) if __name__ == "__main__": lowercase__ = input("""Enter a string """).strip() lowercase__ = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
363
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) ) _lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )] index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ ) _lowerCamelCase : float = 0 _lowerCamelCase : list[float] = [0] * len(lowercase__ ) for i in index: if weight[i] <= capacity: _lowerCamelCase : int = 1 max_value += value[i] capacity -= weight[i] else: _lowerCamelCase : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class lowerCAmelCase__ ( a__ ): '''simple docstring''' lowerCamelCase__ = "xlnet" lowerCamelCase__ = ["mems"] lowerCamelCase__ = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , lowercase=32000 , lowercase=1024 , lowercase=24 , lowercase=16 , lowercase=4096 , lowercase="gelu" , lowercase=True , lowercase="bi" , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase=512 , lowercase=None , lowercase=True , lowercase=False , lowercase=False , lowercase=-1 , lowercase=False , lowercase="last" , lowercase=True , lowercase="tanh" , lowercase=0.1 , lowercase=5 , lowercase=5 , lowercase=5 , lowercase=1 , lowercase=2 , **lowercase , ): _lowerCamelCase : Union[str, Any] = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : Union[str, Any] = n_layer _lowerCamelCase : Optional[int] = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _lowerCamelCase : Tuple = d_model // n_head _lowerCamelCase : Tuple = ff_activation _lowerCamelCase : str = d_inner _lowerCamelCase : List[str] = untie_r _lowerCamelCase : Optional[Any] = attn_type _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Any = dropout _lowerCamelCase : Optional[int] = mem_len _lowerCamelCase : Optional[int] = reuse_len _lowerCamelCase : List[Any] = bi_data _lowerCamelCase : List[str] = clamp_len _lowerCamelCase : Tuple = same_length _lowerCamelCase : List[str] = summary_type _lowerCamelCase : List[Any] = summary_use_proj _lowerCamelCase : List[str] = summary_activation _lowerCamelCase : Any = summary_last_dropout _lowerCamelCase : Any = start_n_top _lowerCamelCase : List[str] = end_n_top _lowerCamelCase : str = bos_token_id _lowerCamelCase : Optional[Any] = pad_token_id _lowerCamelCase : Optional[int] = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , SCREAMING_SNAKE_CASE_ , ) _lowerCamelCase : Union[str, Any] = kwargs['use_cache'] _lowerCamelCase : Tuple = use_mems_eval _lowerCamelCase : int = use_mems_train super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def A_ ( self ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def A_ ( self , lowercase ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
364
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowercase__ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowercase__ = [] lowercase__ = [] lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowercase__ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", """emoji""": True, }, } ] lowercase__ = 0 for log in Path().glob("""*.log"""): lowercase__ = 0 with open(log, """r""") as f: for line in f: lowercase__ = json.loads(line) if line.get("""nodeid""", """""") != "": lowercase__ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowercase__ = F"{line['duration']:.4f}" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowercase__ = [] log.unlink() lowercase__ = """""" lowercase__ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowercase__ = [] lowercase__ = {} for test in failed_tests: lowercase__ = test[0].split("""::""") lowercase__ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowercase__ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowercase__ = [test[0] for test in failed_table] lowercase__ = list(set(files)) # Count number of instances in failed_tests lowercase__ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowercase__ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: lowercase__ = """Too many failed tests, please see the full report in the Action results.""" lowercase__ = len(err) + 10 lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}" print(F"### {message}") else: lowercase__ = """No failed tests! 🤗""" print(F"## {message}") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) lowercase__ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowercase__ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowercase__ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowercase__ = row[0] else: lowercase__ = """""" lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
12
0
"""simple docstring""" lowercase__ = 8.314462 # Unit - J mol-1 K-1 def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
365
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """AutoTokenizer""" lowerCamelCase__ = ["""tokenizer"""] lowerCamelCase__ = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , lowercase , lowercase=None ): super().__init__(lowercase ) _lowerCamelCase : Optional[int] = speaker_embeddings @classmethod def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: _lowerCamelCase : Optional[Any] = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'''`{os.path.join(lowercase , lowercase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) _lowerCamelCase : List[Any] = None else: with open(lowercase ) as speaker_embeddings_json: _lowerCamelCase : Union[str, Any] = json.load(lowercase ) else: _lowerCamelCase : Tuple = None _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase ) _lowerCamelCase : int = {} _lowerCamelCase : List[Any] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase ) _lowerCamelCase : Any = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , ) _lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' ) _lowerCamelCase : Optional[Any] = tmp_dict with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def A_ ( self , lowercase = None , **lowercase ): _lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset] _lowerCamelCase : Any = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) _lowerCamelCase : Union[str, Any] = get_file_from_repo( self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if path is None: raise ValueError( F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) _lowerCamelCase : List[str] = np.load(lowercase ) return voice_preset_dict def A_ ( self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _lowerCamelCase : Any = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ): _lowerCamelCase : Optional[Any] = voice_preset + '.npz' _lowerCamelCase : Union[str, Any] = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) _lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase ) _lowerCamelCase : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: _lowerCamelCase : Optional[int] = voice_preset return encoded_text
12
0
"""simple docstring""" from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' lowerCamelCase__ = """""" lowerCamelCase__ = """hf-legacy""" # "hf://"" is reserved for hffs def __init__( self , lowercase = None , lowercase = None , **lowercase , ): super().__init__(self , **__lowercase ) _lowerCamelCase : Tuple = repo_info _lowerCamelCase : Dict = token _lowerCamelCase : Optional[Any] = None def A_ ( self ): if self.dir_cache is None: _lowerCamelCase : int = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes _lowerCamelCase : Any = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__lowercase ): {'name': str(__lowercase ), 'size': None, 'type': 'directory'} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def A_ ( self , lowercase , lowercase = "rb" , **lowercase , ): if not isinstance(self.repo_info , __lowercase ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) _lowerCamelCase : str = hf_hub_url(self.repo_info.id , __lowercase , revision=self.repo_info.sha ) return fsspec.open( __lowercase , mode=__lowercase , headers=get_authentication_headers_for_url(__lowercase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open() def A_ ( self , lowercase , **lowercase ): self._get_dirs() _lowerCamelCase : Optional[Any] = self._strip_protocol(__lowercase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__lowercase ) def A_ ( self , lowercase , lowercase=False , **lowercase ): self._get_dirs() _lowerCamelCase : Tuple = PurePosixPath(path.strip('/' ) ) _lowerCamelCase : Optional[int] = {} for p, f in self.dir_cache.items(): _lowerCamelCase : Any = PurePosixPath(p.strip('/' ) ) _lowerCamelCase : List[str] = p.parent if root == path: _lowerCamelCase : Union[str, Any] = f _lowerCamelCase : Tuple = list(paths.values() ) if detail: return out else: return sorted(f['name'] for f in out )
366
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowercase__ = False class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCamelCase : Dict = torch.manual_seed(0 ) _lowerCamelCase : Dict = pipe( image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _lowerCamelCase : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
12
0
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): while second != 0: _lowerCamelCase : int = first & second first ^= second _lowerCamelCase : Any = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = int(input("""Enter the first number: """).strip()) lowercase__ = int(input("""Enter the second number: """).strip()) print(F"{add(first, second) = }")
367
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase__ = { """E""": 12.70, """T""": 9.06, """A""": 8.17, """O""": 7.51, """I""": 6.97, """N""": 6.75, """S""": 6.33, """H""": 6.09, """R""": 5.99, """D""": 4.25, """L""": 4.03, """C""": 2.78, """U""": 2.76, """M""": 2.41, """W""": 2.36, """F""": 2.23, """G""": 2.02, """Y""": 1.97, """P""": 1.93, """B""": 1.29, """V""": 0.98, """K""": 0.77, """J""": 0.15, """X""": 0.15, """Q""": 0.10, """Z""": 0.07, } lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( lowercase__ ): return x[0] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = get_letter_count(lowercase__ ) _lowerCamelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase__ ) _lowerCamelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ ) _lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] ) _lowerCamelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase__ , reverse=lowercase__ ) _lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : str = get_frequency_order(lowercase__ ) _lowerCamelCase : Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType lowercase__ = None lowercase__ = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image lowercase__ = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = True lowerCamelCase__ = None # Automatically constructed lowerCamelCase__ = "PIL.Image.Image" lowerCamelCase__ = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} ) lowerCamelCase__ = field(default="""Image""", init=_UpperCamelCase, repr=_UpperCamelCase ) def __call__( self ): return self.pa_type def A_ ( self , lowercase ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCamelCase : Optional[Any] = np.array(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): return {"path": value, "bytes": None} elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): return {"path": None, "bytes": value} elif isinstance(_UpperCAmelCase , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_UpperCAmelCase ) elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def A_ ( self , lowercase , lowercase=None ): if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.' ) if token_per_repo_id is None: _lowerCamelCase : Dict = {} _lowerCamelCase : Tuple = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(_UpperCAmelCase ): _lowerCamelCase : Optional[int] = PIL.Image.open(_UpperCAmelCase ) else: _lowerCamelCase : Union[str, Any] = path.split('::' )[-1] try: _lowerCamelCase : Tuple = string_to_dict(_UpperCAmelCase , config.HUB_DATASETS_URL )['repo_id'] _lowerCamelCase : Union[str, Any] = token_per_repo_id.get(_UpperCAmelCase ) except ValueError: _lowerCamelCase : List[Any] = None with xopen(_UpperCAmelCase , 'rb' , use_auth_token=_UpperCAmelCase ) as f: _lowerCamelCase : str = BytesIO(f.read() ) _lowerCamelCase : List[Any] = PIL.Image.open(bytes_ ) else: _lowerCamelCase : str = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def A_ ( self ): from .features import Value return ( self if self.decode else { "bytes": Value('binary' ), "path": Value('string' ), } ) def A_ ( self , lowercase ): if pa.types.is_string(storage.type ): _lowerCamelCase : List[str] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.binary() ) _lowerCamelCase : Any = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _lowerCamelCase : List[Any] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() ) _lowerCamelCase : int = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: _lowerCamelCase : List[Any] = storage.field('bytes' ) else: _lowerCamelCase : List[Any] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: _lowerCamelCase : Union[str, Any] = storage.field('path' ) else: _lowerCamelCase : Tuple = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() ) _lowerCamelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _lowerCamelCase : Tuple = pa.array( [encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) _lowerCamelCase : str = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() ) _lowerCamelCase : int = pa.StructArray.from_arrays( [bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase , self.pa_type ) def A_ ( self , lowercase ): @no_op_if_value_is_null def path_to_bytes(lowercase ): with xopen(_UpperCAmelCase , 'rb' ) as f: _lowerCamelCase : Optional[int] = f.read() return bytes_ _lowerCamelCase : List[str] = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _lowerCamelCase : Optional[Any] = pa.array( [os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , ) _lowerCamelCase : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase , self.pa_type ) def _snake_case ( ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _lowerCamelCase : Union[str, Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = BytesIO() if image.format in list_image_compression_formats(): _lowerCamelCase : str = image.format else: _lowerCamelCase : Optional[Any] = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(lowerCAmelCase_ , format=lowerCAmelCase_ ) return buffer.getvalue() def _snake_case ( lowercase__ ): if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def _snake_case ( lowercase__ ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) _lowerCamelCase : str = array.dtype _lowerCamelCase : List[Any] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER _lowerCamelCase : List[str] = dtype.kind _lowerCamelCase : str = dtype.itemsize _lowerCamelCase : Dict = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _lowerCamelCase : Optional[Any] = np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _lowerCamelCase : Optional[int] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _lowerCamelCase : Dict = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ ) _lowerCamelCase : Union[str, Any] = np.dtype(lowerCAmelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) _lowerCamelCase : str = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def _snake_case ( lowercase__ ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: _lowerCamelCase : Any = first_non_null_value(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCAmelCase_ , np.ndarray ): _lowerCamelCase : List[Any] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] elif isinstance(lowerCAmelCase_ , PIL.Image.Image ): _lowerCamelCase : str = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] else: return objs else: return objs
368
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase ): _lowerCamelCase : Dict = question_encoder _lowerCamelCase : List[Any] = generator _lowerCamelCase : Optional[Any] = self.question_encoder def A_ ( self , lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' ) _lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' ) self.question_encoder.save_pretrained(lowercase ) self.generator.save_pretrained(lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase ) if config is None: _lowerCamelCase : int = RagConfig.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' ) _lowerCamelCase : Dict = AutoTokenizer.from_pretrained( lowercase , config=config.generator , subfolder='generator_tokenizer' ) return cls(question_encoder=lowercase , generator=lowercase ) def __call__( self , *lowercase , **lowercase ): return self.current_tokenizer(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.decode(*lowercase , **lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.question_encoder def A_ ( self ): _lowerCamelCase : Optional[Any] = self.generator def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ): warnings.warn( '`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ' 'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ' 'context manager to prepare your targets. See the documentation of your specific tokenizer for more ' 'details' , lowercase , ) if max_length is None: _lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length _lowerCamelCase : Optional[Any] = self( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _lowerCamelCase : int = self.current_tokenizer.model_max_length _lowerCamelCase : str = self( text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , ) _lowerCamelCase : int = labels['input_ids'] return model_inputs
12
0
"""simple docstring""" def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = [] _lowerCamelCase : int = set({'(', '[', '{'} ) _lowerCamelCase : Optional[Any] = set({')', ']', '}'} ) _lowerCamelCase : Optional[int] = {"{": "}", "[": "]", "(": ")"} for i in range(len(__SCREAMING_SNAKE_CASE ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(__SCREAMING_SNAKE_CASE ) == 0 or (len(__SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(__SCREAMING_SNAKE_CASE ) == 0 def _snake_case ( ): _lowerCamelCase : str = input('Enter sequence of brackets: ' ) if is_balanced(__SCREAMING_SNAKE_CASE ): print(__SCREAMING_SNAKE_CASE , 'is balanced' ) else: print(__SCREAMING_SNAKE_CASE , 'is not balanced' ) if __name__ == "__main__": main()
369
"""simple docstring""" def _snake_case ( lowercase__ = 10 ): if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError('Invalid input' ) _lowerCamelCase : str = 10**n _lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"{solution(10) = }")
12
0
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowercase__ = logging.getLogger(__name__) def _snake_case ( ): _lowerCamelCase : Optional[Any] = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=__lowerCAmelCase , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=__lowerCAmelCase , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=__lowerCAmelCase , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=__lowerCAmelCase , default=1000 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=__lowerCAmelCase , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=__lowerCAmelCase , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=__lowerCAmelCase , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) _lowerCamelCase : List[Any] = parser.parse_args() return args def _snake_case ( lowercase__ ): def fn(lowercase__ ): return tokenizer(examples['text'] ) return fn def _snake_case ( lowercase__ ): _lowerCamelCase : Any = [] for i in range(len(tokenized_data['input_ids'] ) ): _lowerCamelCase : Optional[int] = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } _lowerCamelCase : List[str] = tf.train.Features(feature=__lowerCAmelCase ) _lowerCamelCase : Dict = tf.train.Example(features=__lowerCAmelCase ) _lowerCamelCase : List[str] = example.SerializeToString() records.append(__lowerCAmelCase ) return records def _snake_case ( lowercase__ ): _lowerCamelCase : Any = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _lowerCamelCase : Any = min(len(__lowerCAmelCase ) , args.limit ) _lowerCamelCase : Optional[int] = dataset.select(range(__lowerCAmelCase ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) _lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _lowerCamelCase : Union[str, Any] = os.path.join(args.output_dir , args.split ) if not os.path.exists(__lowerCAmelCase ): os.makedirs(__lowerCAmelCase ) else: _lowerCamelCase : List[str] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _lowerCamelCase : Optional[int] = tokenize_function(__lowerCAmelCase ) _lowerCamelCase : List[Any] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowercase__ ): # Concatenate all texts. _lowerCamelCase : Union[str, Any] = {k: sum(examples[k] , [] ) for k in examples.keys()} _lowerCamelCase : Dict = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _lowerCamelCase : Tuple = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _lowerCamelCase : List[str] = { k: [t[i : i + args.max_length] for i in range(0 , __lowerCAmelCase , args.max_length )] for k, t in concatenated_examples.items() } return result _lowerCamelCase : Optional[int] = dataset_tokenized.map(__lowerCAmelCase , batched=__lowerCAmelCase , batch_size=1000 , num_proc=4 ) _lowerCamelCase : Tuple = 0 _lowerCamelCase : str = 0 for shard in range(0 , len(__lowerCAmelCase ) , args.shard_size ): _lowerCamelCase : Tuple = grouped_dataset[shard : shard + args.shard_size] _lowerCamelCase : Union[str, Any] = len(dataset_snapshot['input_ids'] ) _lowerCamelCase : Optional[Any] = os.path.join(__lowerCAmelCase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _lowerCamelCase : Dict = get_serialized_examples(__lowerCAmelCase ) with tf.io.TFRecordWriter(__lowerCAmelCase ) as out_file: for i in range(len(__lowerCAmelCase ) ): _lowerCamelCase : Tuple = serialized_examples[i] out_file.write(__lowerCAmelCase ) print('Wrote file {} containing {} records'.format(__lowerCAmelCase , __lowerCAmelCase ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=__lowerCAmelCase ) if __name__ == "__main__": lowercase__ = parse_args() main(args)
370
"""simple docstring""" import argparse import datetime def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } _lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase__ ) < 11: raise ValueError('Must be 10 characters long' ) # Get month _lowerCamelCase : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) _lowerCamelCase : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day _lowerCamelCase : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator _lowerCamelCase : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year _lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation _lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) ) # Start math if m <= 2: _lowerCamelCase : str = y - 1 _lowerCamelCase : Tuple = m + 12 # maths var _lowerCamelCase : int = int(str(lowercase__ )[:2] ) _lowerCamelCase : int = int(str(lowercase__ )[2:] ) _lowerCamelCase : int = int(2.6 * m - 5.3_9 ) _lowerCamelCase : int = int(c / 4 ) _lowerCamelCase : int = int(k / 4 ) _lowerCamelCase : int = int(d + k ) _lowerCamelCase : int = int(t + u + v + x ) _lowerCamelCase : int = int(z - (2 * c) ) _lowerCamelCase : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response _lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) lowercase__ = parser.parse_args() zeller(args.date_input)
12
0
def _snake_case ( lowercase__ , lowercase__ ): return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
371
"""simple docstring""" import re def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' ) if match := re.search(lowercase__ , lowercase__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
12
0
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase ): _lowerCamelCase : Dict = question_encoder _lowerCamelCase : List[Any] = generator _lowerCamelCase : Optional[Any] = self.question_encoder def A_ ( self , lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' ) _lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' ) self.question_encoder.save_pretrained(lowercase ) self.generator.save_pretrained(lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase ) if config is None: _lowerCamelCase : int = RagConfig.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' ) _lowerCamelCase : Dict = AutoTokenizer.from_pretrained( lowercase , config=config.generator , subfolder='generator_tokenizer' ) return cls(question_encoder=lowercase , generator=lowercase ) def __call__( self , *lowercase , **lowercase ): return self.current_tokenizer(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.decode(*lowercase , **lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.question_encoder def A_ ( self ): _lowerCamelCase : Optional[Any] = self.generator def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ): warnings.warn( '`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ' 'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ' 'context manager to prepare your targets. See the documentation of your specific tokenizer for more ' 'details' , lowercase , ) if max_length is None: _lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length _lowerCamelCase : Optional[Any] = self( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _lowerCamelCase : int = self.current_tokenizer.model_max_length _lowerCamelCase : str = self( text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , ) _lowerCamelCase : int = labels['input_ids'] return model_inputs
350
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowercase__ = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} lowercase__ = """zero2""" lowercase__ = """zero3""" lowercase__ = [ZEROa, ZEROa] def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test lowercase__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) def A_ ( self , lowercase ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = models[model] _lowerCamelCase : Optional[int] = self.run_trainer( stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , ) self.do_checks(lowercase ) return output_dir def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase ) _lowerCamelCase : Any = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowercase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _lowerCamelCase : Dict = self.get_launcher(lowercase ) _lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase , env=self.get_env() ) return output_dir def A_ ( self , lowercase=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
12
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , *lowercase , **lowercase ): warnings.warn( 'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use MobileViTImageProcessor instead.' , lowercase , ) super().__init__(*lowercase , **lowercase )
351
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""pixel_values"""] def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ): super().__init__(**lowercase ) _lowerCamelCase : Optional[Any] = do_rescale _lowerCamelCase : Union[str, Any] = rescale_factor _lowerCamelCase : Any = do_pad _lowerCamelCase : Optional[int] = pad_size def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase = None ): _lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase ) _lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height _lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase ) def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): _lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad _lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size _lowerCamelCase : Dict = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. _lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images] if do_rescale: _lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_pad: _lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images] _lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] _lowerCamelCase : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=lowercase , tensor_type=lowercase )
12
0
import re def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' ) if match := re.search(lowercase__ , lowercase__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
352
"""simple docstring""" import os import string import sys lowercase__ = 1 << 8 lowercase__ = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } lowercase__ = KEYMAP["""up"""] lowercase__ = KEYMAP["""left"""] if sys.platform == "win32": lowercase__ = [] lowercase__ = { B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): lowercase__ = ord(str(i)) def _snake_case ( ): if os.name == "nt": import msvcrt _lowerCamelCase : Any = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowercase__ ) == 0: # Read the keystroke _lowerCamelCase : str = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCamelCase : List[Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(lowercase__ ) if ord(lowercase__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _lowerCamelCase : List[Any] = chr(KEYMAP['esc'] ) except KeyError: _lowerCamelCase : int = cha[1] else: _lowerCamelCase : Optional[int] = ch.decode(lowercase__ ) else: _lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCamelCase : List[str] = sys.stdin.fileno() _lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ ) try: tty.setraw(lowercase__ ) _lowerCamelCase : Optional[Any] = sys.stdin.read(1 ) finally: termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ ) return ch def _snake_case ( ): _lowerCamelCase : int = get_raw_chars() if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowercase__ ) == KEYMAP["esc"]: _lowerCamelCase : Union[str, Any] = get_raw_chars() if ord(lowercase__ ) == KEYMAP["mod_int"]: _lowerCamelCase : List[Any] = get_raw_chars() if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowercase__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
12
0
"""simple docstring""" import sys from pathlib import Path lowercase__ = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} lowercase__ = """zero2""" lowercase__ = """zero3""" lowercase__ = [ZEROa, ZEROa] def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): """simple docstring""" _lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test lowercase__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) def A_ ( self , lowercase ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = models[model] _lowerCamelCase : Optional[int] = self.run_trainer( stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , ) self.do_checks(lowercase ) return output_dir def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase ) _lowerCamelCase : Any = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowercase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _lowerCamelCase : Dict = self.get_launcher(lowercase ) _lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase , env=self.get_env() ) return output_dir def A_ ( self , lowercase=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
353
"""simple docstring""" from typing import Any def _snake_case ( lowercase__ ): if not input_list: return [] _lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list] _lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _snake_case ( lowercase__ ): if is_torch_version('<' , '2.0.0' ) or not hasattr(lowercase__ , '_dynamo' ): return False return isinstance(lowercase__ , torch._dynamo.eval_frame.OptimizedModule ) def _snake_case ( lowercase__ , lowercase__ = True ): _lowerCamelCase : Optional[int] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) _lowerCamelCase : Optional[int] = is_compiled_module(lowercase__ ) if is_compiled: _lowerCamelCase : Optional[int] = model _lowerCamelCase : int = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(lowercase__ , lowercase__ ): _lowerCamelCase : Union[str, Any] = model.module if not keep_fpaa_wrapper: _lowerCamelCase : Any = getattr(lowercase__ , 'forward' ) _lowerCamelCase : Tuple = model.__dict__.pop('_original_forward' , lowercase__ ) if original_forward is not None: while hasattr(lowercase__ , '__wrapped__' ): _lowerCamelCase : List[Any] = forward.__wrapped__ if forward == original_forward: break _lowerCamelCase : Optional[Any] = forward if getattr(lowercase__ , '_converted_to_transformer_engine' , lowercase__ ): convert_model(lowercase__ , to_transformer_engine=lowercase__ ) if is_compiled: _lowerCamelCase : List[Any] = model _lowerCamelCase : str = compiled_model return model def _snake_case ( ): PartialState().wait_for_everyone() def _snake_case ( lowercase__ , lowercase__ ): if PartialState().distributed_type == DistributedType.TPU: xm.save(lowercase__ , lowercase__ ) elif PartialState().local_process_index == 0: torch.save(lowercase__ , lowercase__ ) @contextmanager def _snake_case ( **lowercase__ ): for key, value in kwargs.items(): _lowerCamelCase : List[str] = str(lowercase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _snake_case ( lowercase__ ): if not hasattr(lowercase__ , '__qualname__' ) and not hasattr(lowercase__ , '__name__' ): _lowerCamelCase : List[Any] = getattr(lowercase__ , '__class__' , lowercase__ ) if hasattr(lowercase__ , '__qualname__' ): return obj.__qualname__ if hasattr(lowercase__ , '__name__' ): return obj.__name__ return str(lowercase__ ) def _snake_case ( lowercase__ , lowercase__ ): for key, value in source.items(): if isinstance(lowercase__ , lowercase__ ): _lowerCamelCase : List[Any] = destination.setdefault(lowercase__ , {} ) merge_dicts(lowercase__ , lowercase__ ) else: _lowerCamelCase : Union[str, Any] = value return destination def _snake_case ( lowercase__ = None ): if port is None: _lowerCamelCase : List[str] = 29500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
354
"""simple docstring""" def _snake_case ( lowercase__ ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection _lowerCamelCase : List[str] = len(lowercase__ ) _lowerCamelCase : List[str] = max(lowercase__ ) _lowerCamelCase : List[str] = min(lowercase__ ) # create the counting array _lowerCamelCase : List[Any] = coll_max + 1 - coll_min _lowerCamelCase : List[Any] = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): _lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1] # create the output collection _lowerCamelCase : Dict = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): _lowerCamelCase : Any = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _snake_case ( lowercase__ ): return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt" lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(counting_sort(unsorted))
12
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""pixel_values"""] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = True , lowercase = 1 / 255 , lowercase = None , lowercase = True , lowercase = None , lowercase = None , **lowercase , ): super().__init__(**lowercase ) _lowerCamelCase : Any = size if size is not None else {'height': 224, 'width': 224} _lowerCamelCase : List[str] = get_size_dict(lowercase ) _lowerCamelCase : int = crop_size if crop_size is not None else {'height': 224, 'width': 224} _lowerCamelCase : List[Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name='crop_size' ) _lowerCamelCase : List[Any] = do_resize _lowerCamelCase : str = do_rescale _lowerCamelCase : Optional[Any] = do_normalize _lowerCamelCase : Optional[int] = do_center_crop _lowerCamelCase : Union[str, Any] = crop_size _lowerCamelCase : Any = size _lowerCamelCase : Union[str, Any] = resample _lowerCamelCase : List[str] = rescale_factor _lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCamelCase : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ): _lowerCamelCase : Optional[Any] = get_size_dict(lowercase ) if "shortest_edge" in size: _lowerCamelCase : List[str] = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _lowerCamelCase : Optional[Any] = (size['height'], size['width']) else: raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ): _lowerCamelCase : int = get_size_dict(lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ): return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): _lowerCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize _lowerCamelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize _lowerCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size _lowerCamelCase : Tuple = get_size_dict(lowercase , param_name='crop_size' , default_to_square=lowercase ) _lowerCamelCase : Any = resample if resample is not None else self.resample _lowerCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean _lowerCamelCase : Dict = image_std if image_std is not None else self.image_std _lowerCamelCase : Dict = size if size is not None else self.size _lowerCamelCase : List[str] = get_size_dict(lowercase ) if not is_batched(lowercase ): _lowerCamelCase : List[Any] = [images] if not valid_images(lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. _lowerCamelCase : Union[str, Any] = [to_numpy_array(lowercase ) for image in images] if do_resize: _lowerCamelCase : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images] if do_center_crop: _lowerCamelCase : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images] if do_rescale: _lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_normalize: _lowerCamelCase : List[Any] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images] _lowerCamelCase : List[str] = [to_channel_dimension_format(lowercase , lowercase ) for image in images] _lowerCamelCase : List[Any] = {'pixel_values': images} return BatchFeature(data=lowercase , tensor_type=lowercase )
355
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) lowercase__ = parser.parse_args() lowercase__ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
12
0
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def _snake_case ( lowercase__ ): def wrapper(*lowercase__ , **lowercase__ ): _lowerCamelCase : Tuple = timeit.default_timer() _lowerCamelCase : Tuple = func(*lowercase__ , **lowercase__ ) _lowerCamelCase : str = timeit.default_timer() - starttime return delta _lowerCamelCase : List[Any] = func.__name__ return wrapper def _snake_case ( lowercase__ , lowercase__=100 , lowercase__=None ): _lowerCamelCase : Dict = [] _lowerCamelCase : List[str] = seq_shapes or {} for i in range(lowercase__ ): _lowerCamelCase : Optional[int] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(lowercase__ , _ArrayXD ): _lowerCamelCase : List[str] = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(lowercase__ , datasets.Value ): if v.dtype == "string": _lowerCamelCase : Any = 'The small grey turtle was surprisingly fast when challenged.' else: _lowerCamelCase : List[str] = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(lowercase__ , datasets.Sequence ): while isinstance(lowercase__ , datasets.Sequence ): _lowerCamelCase : Tuple = v.feature _lowerCamelCase : List[Any] = seq_shapes[k] _lowerCamelCase : int = np.random.rand(*lowercase__ ).astype(v.dtype ) _lowerCamelCase : List[str] = data dummy_data.append((i, example) ) return dummy_data def _snake_case ( lowercase__ , lowercase__ , lowercase__=100 , lowercase__=None ): _lowerCamelCase : List[str] = generate_examples(lowercase__ , num_examples=lowercase__ , seq_shapes=lowercase__ ) with ArrowWriter(features=lowercase__ , path=lowercase__ ) as writer: for key, record in dummy_data: _lowerCamelCase : str = features.encode_example(lowercase__ ) writer.write(lowercase__ ) _lowerCamelCase : Optional[Any] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) _lowerCamelCase : Optional[int] = datasets.Dataset.from_file(filename=lowercase__ , info=datasets.DatasetInfo(features=lowercase__ ) ) return dataset
356
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = (UnCLIPScheduler,) def A_ ( self , **lowercase ): _lowerCamelCase : Any = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**lowercase ) return config def A_ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def A_ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=lowercase ) def A_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase ) def A_ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=lowercase ) def A_ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=lowercase ) def A_ ( self ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=lowercase , prev_timestep=lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' ) _lowerCamelCase : str = scheduler_class(**lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5 def A_ ( self ): _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' ) _lowerCamelCase : int = scheduler_class(**lowercase ) _lowerCamelCase : List[str] = 0.5 assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5 def A_ ( self ): _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : Tuple = scheduler_class(**lowercase ) _lowerCamelCase : Union[str, Any] = scheduler.timesteps _lowerCamelCase : Any = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter _lowerCamelCase : Optional[int] = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : Tuple = model(lowercase , lowercase ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample _lowerCamelCase : Optional[int] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def A_ ( self ): _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Optional[Any] = scheduler_class(**lowercase ) scheduler.set_timesteps(25 ) _lowerCamelCase : Optional[Any] = scheduler.timesteps _lowerCamelCase : Optional[int] = self.dummy_model() _lowerCamelCase : Any = self.dummy_sample_deter _lowerCamelCase : str = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : List[Any] = model(lowercase , lowercase ) if i + 1 == timesteps.shape[0]: _lowerCamelCase : Optional[int] = None else: _lowerCamelCase : List[str] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Union[str, Any] = scheduler.step( lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample _lowerCamelCase : List[Any] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def A_ ( self ): pass def A_ ( self ): pass
12
0
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() lowercase__ = logging.get_logger(__name__) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : List[str] = WavaVecaForSequenceClassification.from_pretrained(lowercase__ , config=lowercase__ ) _lowerCamelCase : Tuple = downstream_dict['projector.weight'] _lowerCamelCase : List[str] = downstream_dict['projector.bias'] _lowerCamelCase : Dict = downstream_dict['model.post_net.linear.weight'] _lowerCamelCase : int = downstream_dict['model.post_net.linear.bias'] return model def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Union[str, Any] = WavaVecaForAudioFrameClassification.from_pretrained(lowercase__ , config=lowercase__ ) _lowerCamelCase : Any = downstream_dict['model.linear.weight'] _lowerCamelCase : str = downstream_dict['model.linear.bias'] return model def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Dict = WavaVecaForXVector.from_pretrained(lowercase__ , config=lowercase__ ) _lowerCamelCase : str = downstream_dict['connector.weight'] _lowerCamelCase : Dict = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _lowerCamelCase : Optional[Any] = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] _lowerCamelCase : Dict = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] _lowerCamelCase : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] _lowerCamelCase : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] _lowerCamelCase : str = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] _lowerCamelCase : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] _lowerCamelCase : Tuple = downstream_dict['objective.W'] return model @torch.no_grad() def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Optional[Any] = torch.load(lowercase__ , map_location='cpu' ) _lowerCamelCase : List[Any] = checkpoint['Downstream'] _lowerCamelCase : List[str] = WavaVecaConfig.from_pretrained(lowercase__ ) _lowerCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained( lowercase__ , return_attention_mask=lowercase__ , do_normalize=lowercase__ ) _lowerCamelCase : Union[str, Any] = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): _lowerCamelCase : Optional[int] = convert_classification(lowercase__ , lowercase__ , lowercase__ ) elif arch.endswith('ForAudioFrameClassification' ): _lowerCamelCase : str = convert_diarization(lowercase__ , lowercase__ , lowercase__ ) elif arch.endswith('ForXVector' ): _lowerCamelCase : List[Any] = convert_xvector(lowercase__ , lowercase__ , lowercase__ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: _lowerCamelCase : Dict = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(lowercase__ ) hf_model.save_pretrained(lowercase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( """--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model.""" ) parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""") parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""") lowercase__ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
357
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """data2vec-audio""" def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) _lowerCamelCase : str = hidden_size _lowerCamelCase : str = feat_extract_activation _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Optional[Any] = conv_bias _lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings _lowerCamelCase : List[Any] = num_conv_pos_embedding_groups _lowerCamelCase : List[Any] = conv_pos_kernel_size _lowerCamelCase : Optional[int] = len(self.conv_dim ) _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Any = intermediate_size _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Any = hidden_dropout _lowerCamelCase : Union[str, Any] = attention_dropout _lowerCamelCase : str = activation_dropout _lowerCamelCase : Any = feat_proj_dropout _lowerCamelCase : Tuple = final_dropout _lowerCamelCase : Union[str, Any] = layerdrop _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Optional[Any] = mask_time_prob _lowerCamelCase : List[Any] = mask_time_length _lowerCamelCase : List[Any] = mask_time_min_masks _lowerCamelCase : Tuple = mask_feature_prob _lowerCamelCase : Optional[Any] = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # ctc loss _lowerCamelCase : Tuple = ctc_loss_reduction _lowerCamelCase : str = ctc_zero_infinity # adapter _lowerCamelCase : Union[str, Any] = add_adapter _lowerCamelCase : List[Any] = adapter_kernel_size _lowerCamelCase : Optional[Any] = adapter_stride _lowerCamelCase : List[Any] = num_adapter_layers _lowerCamelCase : int = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCamelCase : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCamelCase : List[str] = list(lowercase ) _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Any = list(lowercase ) _lowerCamelCase : Optional[Any] = xvector_output_dim @property def A_ ( self ): return math.prod(self.conv_stride )
12
0
"""simple docstring""" import math import os import sys def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = '' try: with open(lowercase__ , 'rb' ) as binary_file: _lowerCamelCase : Union[str, Any] = binary_file.read() for dat in data: _lowerCamelCase : List[str] = f'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lexicon.pop(lowercase__ ) _lowerCamelCase : Dict = last_match_id if math.loga(lowercase__ ).is_integer(): for curr_key in lexicon: _lowerCamelCase : int = '0' + lexicon[curr_key] _lowerCamelCase : Any = bin(lowercase__ )[2:] def _snake_case ( lowercase__ ): _lowerCamelCase : List[str] = {'0': '0', '1': '1'} _lowerCamelCase : List[Any] = '', '' _lowerCamelCase : Dict = len(lowercase__ ) for i in range(len(lowercase__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue _lowerCamelCase : Optional[int] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) index += 1 _lowerCamelCase : Union[str, Any] = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": _lowerCamelCase : Optional[Any] = lexicon[curr_string] result += last_match_id return result def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : str = os.path.getsize(lowercase__ ) _lowerCamelCase : Any = bin(lowercase__ )[2:] _lowerCamelCase : Union[str, Any] = len(lowercase__ ) return "0" * (length_length - 1) + file_length_binary + compressed def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Optional[int] = 8 try: with open(lowercase__ , 'wb' ) as opened_file: _lowerCamelCase : List[str] = [ to_write[i : i + byte_length] for i in range(0 , len(lowercase__ ) , lowercase__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(lowercase__ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Any = read_file_binary(lowercase__ ) _lowerCamelCase : Dict = compress_data(lowercase__ ) _lowerCamelCase : List[str] = add_file_length(lowercase__ , lowercase__ ) write_file_binary(lowercase__ , lowercase__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
358
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool lowercase__ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """facebook/nllb-200-distilled-600M""" lowerCamelCase__ = ( """This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """ """be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """ """which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """ """plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`.""" ) lowerCamelCase__ = """translator""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = LANGUAGE_CODES lowerCamelCase__ = ["""text""", """text""", """text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase , lowercase , lowercase ): if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) _lowerCamelCase : str = self.lang_to_code[src_lang] _lowerCamelCase : int = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase ) def A_ ( self , lowercase ): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase )
12
0
import os import re import shutil import sys import tempfile import unittest import black lowercase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowercase__ = """ def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states """ class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) ) _lowerCamelCase : Dict = self.transformer_dir shutil.copy( os.path.join(lowercase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , ) def A_ ( self ): _lowerCamelCase : int = 'src/transformers' shutil.rmtree(self.transformer_dir ) def A_ ( self , lowercase , lowercase , lowercase , lowercase=None ): _lowerCamelCase : Optional[Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: _lowerCamelCase : Union[str, Any] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result _lowerCamelCase : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _lowerCamelCase : Tuple = black.format_str(lowercase , mode=lowercase ) _lowerCamelCase : Any = os.path.join(self.transformer_dir , 'new_code.py' ) with open(lowercase , 'w' , newline='\n' ) as f: f.write(lowercase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowercase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowercase ) with open(lowercase , 'r' ) as f: self.assertTrue(f.read() , lowercase ) def A_ ( self ): _lowerCamelCase : Tuple = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' ) self.assertEqual(lowercase , lowercase ) def A_ ( self ): # Base copy consistency self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowercase , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowercase ) , ) # Copy consistency with a really long name _lowerCamelCase : int = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('Bert' , lowercase , lowercase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowercase , overwrite_result=re.sub('Bert' , 'TestModel' , lowercase ) , ) def A_ ( self ): _lowerCamelCase : List[Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md'] _lowerCamelCase : List[str] = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the' ' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for' ' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong' ' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.' ' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),' ' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and' ' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same' ' method has been applied to compress GPT2 into' ' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into' ' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),' ' Multilingual BERT into' ' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German' ' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**' ' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders' ' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang' ' Luong, Quoc V. Le, Christopher D. Manning.' ) _lowerCamelCase : Optional[int] = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) _lowerCamelCase : Any = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.' ' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文' ' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and' ' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same' ' method has been applied to compress GPT2 into' ' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into' ' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),' ' Multilingual BERT into' ' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German' ' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自' ' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather' ' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,' ' Christopher D. Manning 发布。\n' ) _lowerCamelCase : Any = check_copies.convert_to_localized_md( lowercase , lowercase , localized_readme['format_model_list'] ) self.assertFalse(lowercase ) self.assertEqual(lowercase , lowercase ) _lowerCamelCase : List[Any] = check_copies.convert_to_localized_md( lowercase , lowercase , localized_readme['format_model_list'] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(lowercase ) _lowerCamelCase : List[Any] = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the' ' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for' ' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong' ' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.' ) _lowerCamelCase : Dict = ( '1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and' ' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) _lowerCamelCase : Optional[int] = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) _lowerCamelCase : Tuple = check_copies.convert_to_localized_md( lowercase , lowercase , localized_readme['format_model_list'] ) # Check if the model link is synchronized. self.assertEqual(lowercase , lowercase )
359
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = hf_hub_download( repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 ) _lowerCamelCase : List[str] = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def A_ ( self , lowercase , lowercase ): for example in examples: _lowerCamelCase : Tuple = video_classifier(lowercase ) self.assertEqual( lowercase , [ {'score': ANY(lowercase ), 'label': ANY(lowercase )}, {'score': ANY(lowercase ), 'label': ANY(lowercase )}, ] , ) @require_torch def A_ ( self ): _lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' _lowerCamelCase : Tuple = VideoMAEFeatureExtractor( size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} ) _lowerCamelCase : Dict = pipeline( 'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 ) _lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , ) _lowerCamelCase : str = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], ] , ) @require_tf def A_ ( self ): pass
12
0
"""simple docstring""" import requests def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Any = {'Content-Type': 'application/json'} _lowerCamelCase : List[Any] = requests.post(lowercase__ , json={'text': message_body} , headers=lowercase__ ) if response.status_code != 200: _lowerCamelCase : Optional[Any] = ( 'Request to slack returned an error ' f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(lowercase__ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
360
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
0
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """gpt_neo""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , lowercase=50257 , lowercase=2048 , lowercase=2048 , lowercase=24 , lowercase=[[["global", "local"], 12]] , lowercase=16 , lowercase=None , lowercase=256 , lowercase="gelu_new" , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=1E-5 , lowercase=0.02 , lowercase=True , lowercase=50256 , lowercase=50256 , **lowercase , ): _lowerCamelCase : Optional[Any] = vocab_size _lowerCamelCase : Union[str, Any] = max_position_embeddings _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : Dict = num_layers _lowerCamelCase : Optional[int] = num_heads _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : List[Any] = window_size _lowerCamelCase : Optional[Any] = activation_function _lowerCamelCase : List[Any] = resid_dropout _lowerCamelCase : Any = embed_dropout _lowerCamelCase : str = attention_dropout _lowerCamelCase : int = classifier_dropout _lowerCamelCase : Any = layer_norm_epsilon _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : int = use_cache _lowerCamelCase : Dict = bos_token_id _lowerCamelCase : Dict = eos_token_id _lowerCamelCase : Tuple = attention_types _lowerCamelCase : Optional[int] = self.expand_attention_types_params(lowercase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) @staticmethod def A_ ( lowercase ): _lowerCamelCase : Optional[Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): import torch _lowerCamelCase : List[Any] = input.size() _lowerCamelCase : Dict = len(lowercase__ ) _lowerCamelCase : Optional[Any] = shape[dimension] _lowerCamelCase : int = torch.arange(0 , lowercase__ , lowercase__ ) _lowerCamelCase : Dict = torch.div(sizedim - size , lowercase__ , rounding_mode='floor' ) + 1 _lowerCamelCase : Dict = torch.arange(lowercase__ ) + low_indices[:min_length][:, None] _lowerCamelCase : Dict = [slice(lowercase__ )] * rank _lowerCamelCase : str = indices _lowerCamelCase : Optional[Any] = input[s] _lowerCamelCase : str = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(lowercase__ ) def _snake_case ( lowercase__ , lowercase__ ): import torch _lowerCamelCase : Optional[Any] = torch.arange(1 , lowercase__ ) _lowerCamelCase : List[str] = torch.remainder(lowercase__ , lowercase__ ) _lowerCamelCase : Optional[Any] = remainders == 0 _lowerCamelCase : Union[str, Any] = candidates[divisor_indices] _lowerCamelCase : List[str] = torch.max(lowercase__ ) return largest_divisor, torch.div(lowercase__ , lowercase__ , rounding_mode='floor' ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @property def A_ ( self ): _lowerCamelCase : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(lowercase , direction='inputs' ) _lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'} else: _lowerCamelCase : Dict = {0: 'batch', 1: 'sequence'} return common_inputs @property def A_ ( self ): return self._config.num_heads def A_ ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ): _lowerCamelCase : Dict = super(lowercase , self ).generate_dummy_inputs( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) # We need to order the input in the way they appears in the forward() _lowerCamelCase : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowerCamelCase : int = common_inputs['input_ids'].shape # Not using the same length for past_key_values _lowerCamelCase : Any = seqlen + 2 _lowerCamelCase : int = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCamelCase : int = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers ) ] _lowerCamelCase : int = common_inputs['attention_mask'] if self.use_past: _lowerCamelCase : int = ordered_inputs['attention_mask'].dtype _lowerCamelCase : int = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) return ordered_inputs @property def A_ ( self ): return 13
361
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): if attention_mask is None: _lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = OPTConfig lowerCamelCase__ = {} lowerCamelCase__ = """gelu""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ): _lowerCamelCase : Tuple = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Tuple = seq_length _lowerCamelCase : str = is_training _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : List[Any] = eos_token_id _lowerCamelCase : Tuple = pad_token_id _lowerCamelCase : List[str] = bos_token_id _lowerCamelCase : Optional[int] = embed_dim _lowerCamelCase : List[str] = word_embed_proj_dim _lowerCamelCase : Any = False def A_ ( self ): _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCamelCase : Tuple = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , ) _lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase ) return config, inputs_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase ) _lowerCamelCase : Optional[Any] = inputs_dict['input_ids'] _lowerCamelCase : str = input_ids[:1, :] _lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :] _lowerCamelCase : Optional[Any] = 1 # first forward pass _lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) _lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) _lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0] _lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else () lowerCamelCase__ = ( {"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = 10 def A_ ( self ): _lowerCamelCase : int = TFOPTModelTester(self ) _lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase , lowercase ): if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings _lowerCamelCase : Optional[int] = model_class(config=lowercase ) _lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase ) _lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. _lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase ) # check that weights remain the same after resizing _lowerCamelCase : int = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Optional[Any] = False self.assertTrue(lowercase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase ) _lowerCamelCase : Dict = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Union[str, Any] = False self.assertTrue(lowercase ) def _snake_case ( lowercase__ ): return tf.constant(lowercase__ , dtype=tf.intaa ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = 99 def A_ ( self ): _lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2 _lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) _lowerCamelCase : int = input_ids.shape[0] _lowerCamelCase : List[Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' ) _lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id ) with tf.GradientTape(): _lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state _lowerCamelCase : Optional[Any] = (1, 11, 512) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : List[str] = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) ) _lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): super().setUp() _lowerCamelCase : List[Any] = 'facebook/opt-350m' def A_ ( self ): _lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model ) _lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model ) _lowerCamelCase : List[str] = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase ) _lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) _lowerCamelCase : Any = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) _lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def A_ ( self ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def A_ ( self ): _lowerCamelCase : str = 'facebook/opt-125m' _lowerCamelCase : Dict = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : int = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = 'facebook/opt-350m' _lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase ) _lowerCamelCase : Any = 'left' # use different length sentences to test batching _lowerCamelCase : Optional[int] = [ 'Hello, my dog is a little', 'Today, I', ] _lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase ) _lowerCamelCase : int = inputs['input_ids'] _lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] ) _lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase ) _lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) _lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) _lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) def A_ ( self ): _lowerCamelCase : Tuple = 'facebook/opt-350m' _lowerCamelCase : List[Any] = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase )
12
0
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = (UnCLIPScheduler,) def A_ ( self , **lowercase ): _lowerCamelCase : Any = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**lowercase ) return config def A_ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def A_ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=lowercase ) def A_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase ) def A_ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=lowercase ) def A_ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=lowercase ) def A_ ( self ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=lowercase , prev_timestep=lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' ) _lowerCamelCase : str = scheduler_class(**lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5 def A_ ( self ): _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' ) _lowerCamelCase : int = scheduler_class(**lowercase ) _lowerCamelCase : List[str] = 0.5 assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5 def A_ ( self ): _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : Tuple = scheduler_class(**lowercase ) _lowerCamelCase : Union[str, Any] = scheduler.timesteps _lowerCamelCase : Any = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter _lowerCamelCase : Optional[int] = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : Tuple = model(lowercase , lowercase ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample _lowerCamelCase : Optional[int] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 252.2682495 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def A_ ( self ): _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Optional[Any] = scheduler_class(**lowercase ) scheduler.set_timesteps(25 ) _lowerCamelCase : Optional[Any] = scheduler.timesteps _lowerCamelCase : Optional[int] = self.dummy_model() _lowerCamelCase : Any = self.dummy_sample_deter _lowerCamelCase : str = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : List[Any] = model(lowercase , lowercase ) if i + 1 == timesteps.shape[0]: _lowerCamelCase : Optional[int] = None else: _lowerCamelCase : List[str] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Union[str, Any] = scheduler.step( lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample _lowerCamelCase : List[Any] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 258.2044983 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def A_ ( self ): pass def A_ ( self ): pass
362
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """philschmid/bart-large-cnn-samsum""" lowerCamelCase__ = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) lowerCamelCase__ = """summarizer""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = ["""text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase ): return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase )[0] def A_ ( self , lowercase ): return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
12
0
"""simple docstring""" from importlib import import_module from .logging import get_logger lowercase__ = get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase=None ): _lowerCamelCase : Any = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__' ): setattr(self , lowercase , getattr(lowercase , lowercase ) ) _lowerCamelCase : Tuple = module._original_module if isinstance(lowercase , _PatchedModuleObj ) else module class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = [] def __init__( self , lowercase , lowercase , lowercase , lowercase=None ): _lowerCamelCase : int = obj _lowerCamelCase : Dict = target _lowerCamelCase : Optional[Any] = new _lowerCamelCase : Dict = target.split('.' )[0] _lowerCamelCase : Union[str, Any] = {} _lowerCamelCase : Optional[int] = attrs or [] def __enter__( self ): _lowerCamelCase : Dict = self.target.split('.' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(lowercase ) ): try: _lowerCamelCase : Union[str, Any] = import_module('.'.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _lowerCamelCase : Union[str, Any] = getattr(self.obj , lowercase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(lowercase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): _lowerCamelCase : List[str] = obj_attr # patch at top level setattr(self.obj , lowercase , _PatchedModuleObj(lowercase , attrs=self.attrs ) ) _lowerCamelCase : List[Any] = getattr(self.obj , lowercase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(lowercase , lowercase , _PatchedModuleObj(getattr(lowercase , lowercase , lowercase ) , attrs=self.attrs ) ) _lowerCamelCase : List[str] = getattr(lowercase , lowercase ) # finally set the target attribute setattr(lowercase , lowercase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _lowerCamelCase : Any = getattr(import_module('.'.join(lowercase ) ) , lowercase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , lowercase ) is attr_value: _lowerCamelCase : Any = getattr(self.obj , lowercase ) setattr(self.obj , lowercase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _lowerCamelCase : Any = globals()['__builtins__'][target_attr] setattr(self.obj , lowercase , self.new ) else: raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self , *lowercase ): for attr in list(self.original ): setattr(self.obj , lowercase , self.original.pop(lowercase ) ) def A_ ( self ): self.__enter__() self._active_patches.append(self ) def A_ ( self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
363
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) ) _lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )] index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ ) _lowerCamelCase : float = 0 _lowerCamelCase : list[float] = [0] * len(lowercase__ ) for i in index: if weight[i] <= capacity: _lowerCamelCase : int = 1 max_value += value[i] capacity -= weight[i] else: _lowerCamelCase : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _snake_case ( lowercase__ ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self , lowercase , lowercase ): super().__init__() _lowerCamelCase : Tuple = module _lowerCamelCase : int = nn.Sequential( nn.Linear(module.in_features , lowercase , bias=lowercase ) , nn.Linear(lowercase , module.out_features , bias=lowercase ) , ) _lowerCamelCase : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowercase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def A_ ( self , lowercase , *lowercase , **lowercase ): return self.module(lowercase , *lowercase , **lowercase ) + self.adapter(lowercase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = """bigscience/bloom-1b7""" # Constant values lowerCamelCase__ = 2.109659552692574 lowerCamelCase__ = """Hello my name is""" lowerCamelCase__ = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCamelCase__ = 10 def A_ ( self ): # Models and tokenizer _lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(self.model_name ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): super().setUp() # Models and tokenizer _lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) _lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto' ) def A_ ( self ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def A_ ( self ): _lowerCamelCase : int = self.model_abit.config self.assertTrue(hasattr(lowercase , 'quantization_config' ) ) _lowerCamelCase : Optional[int] = config.to_dict() _lowerCamelCase : List[str] = config.to_diff_dict() _lowerCamelCase : Optional[int] = config.to_json_string() def A_ ( self ): from bitsandbytes.nn import Paramsabit _lowerCamelCase : Dict = self.model_fpaa.get_memory_footprint() _lowerCamelCase : Dict = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) _lowerCamelCase : str = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def A_ ( self ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowercase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ) _lowerCamelCase : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase ) , self.EXPECTED_OUTPUTS ) def A_ ( self ): _lowerCamelCase : Dict = BitsAndBytesConfig() _lowerCamelCase : Optional[Any] = True _lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowercase , device_map='auto' ) _lowerCamelCase : int = self.tokenizer(self.input_text , return_tensors='pt' ) _lowerCamelCase : Union[str, Any] = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowercase ) , self.EXPECTED_OUTPUTS ) def A_ ( self ): with self.assertRaises(lowercase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowercase ) def A_ ( self ): _lowerCamelCase : Optional[int] = BitsAndBytesConfig() with self.assertRaises(lowercase ): _lowerCamelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowercase , load_in_abit=lowercase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def A_ ( self ): with self.assertRaises(lowercase ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(lowercase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowercase ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(lowercase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowercase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything _lowerCamelCase : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ) _lowerCamelCase : Any = self.model_fpaa.to(torch.floataa ) _lowerCamelCase : List[Any] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error _lowerCamelCase : int = self.model_fpaa.to('cpu' ) # Check this does not throw an error _lowerCamelCase : Any = self.model_fpaa.half() # Check this does not throw an error _lowerCamelCase : Optional[int] = self.model_fpaa.float() def A_ ( self ): _lowerCamelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowercase , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @classmethod def A_ ( cls ): _lowerCamelCase : List[str] = 't5-small' _lowerCamelCase : Optional[int] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense _lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(cls.model_name ) _lowerCamelCase : Optional[int] = 'Translate in German: Hello, my dog is cute' def A_ ( self ): gc.collect() torch.cuda.empty_cache() def A_ ( self ): from transformers import TaForConditionalGeneration _lowerCamelCase : List[str] = TaForConditionalGeneration._keep_in_fpaa_modules _lowerCamelCase : Dict = None # test with `t5-small` _lowerCamelCase : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto' ) _lowerCamelCase : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) _lowerCamelCase : List[Any] = model.generate(**lowercase ) # test with `flan-t5-small` _lowerCamelCase : Union[str, Any] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowercase , device_map='auto' ) _lowerCamelCase : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) _lowerCamelCase : str = model.generate(**lowercase ) _lowerCamelCase : Tuple = modules def A_ ( self ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` _lowerCamelCase : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) _lowerCamelCase : Dict = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) _lowerCamelCase : Union[str, Any] = model.generate(**lowercase ) # test with `flan-t5-small` _lowerCamelCase : List[str] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowercase , device_map='auto' ) _lowerCamelCase : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) _lowerCamelCase : List[Any] = model.generate(**lowercase ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): super().setUp() # model_name _lowerCamelCase : Optional[int] = 'bigscience/bloom-560m' _lowerCamelCase : Tuple = 't5-small' # Different types of model _lowerCamelCase : str = AutoModel.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto' ) # Sequence classification model _lowerCamelCase : List[str] = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowercase , device_map='auto' ) # CausalLM model _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase , device_map='auto' ) # Seq2seq model _lowerCamelCase : str = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowercase , device_map='auto' ) def A_ ( self ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def A_ ( self ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): super().setUp() def A_ ( self ): del self.pipe gc.collect() torch.cuda.empty_cache() def A_ ( self ): _lowerCamelCase : Optional[Any] = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass _lowerCamelCase : List[str] = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): super().setUp() def A_ ( self ): _lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowercase , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model _lowerCamelCase : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch _lowerCamelCase : Optional[int] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowercase ) , self.EXPECTED_OUTPUTS ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Optional[int] = 'facebook/opt-350m' super().setUp() def A_ ( self ): if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters _lowerCamelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowercase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): _lowerCamelCase : Optional[int] = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability _lowerCamelCase : Union[str, Any] = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowercase ) ): _lowerCamelCase : str = LoRALayer(module.q_proj , rank=16 ) _lowerCamelCase : List[str] = LoRALayer(module.k_proj , rank=16 ) _lowerCamelCase : Dict = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch _lowerCamelCase : Tuple = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): _lowerCamelCase : Optional[Any] = model.forward(**lowercase ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowercase , lowercase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowercase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """gpt2-xl""" lowerCamelCase__ = 3.3191854854152187
364
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowercase__ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowercase__ = [] lowercase__ = [] lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowercase__ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", """emoji""": True, }, } ] lowercase__ = 0 for log in Path().glob("""*.log"""): lowercase__ = 0 with open(log, """r""") as f: for line in f: lowercase__ = json.loads(line) if line.get("""nodeid""", """""") != "": lowercase__ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowercase__ = F"{line['duration']:.4f}" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowercase__ = [] log.unlink() lowercase__ = """""" lowercase__ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowercase__ = [] lowercase__ = {} for test in failed_tests: lowercase__ = test[0].split("""::""") lowercase__ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowercase__ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowercase__ = [test[0] for test in failed_table] lowercase__ = list(set(files)) # Count number of instances in failed_tests lowercase__ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowercase__ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: lowercase__ = """Too many failed tests, please see the full report in the Action results.""" lowercase__ = len(err) + 10 lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}" print(F"### {message}") else: lowercase__ = """No failed tests! 🤗""" print(F"## {message}") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) lowercase__ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowercase__ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowercase__ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowercase__ = row[0] else: lowercase__ = """""" lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
12
0
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowercase__ = get_logger(__name__) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=0 ): os.makedirs(lowercase__ , exist_ok=lowercase__ ) with FSDP.state_dict_type( lowercase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): _lowerCamelCase : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: _lowerCamelCase : Dict = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' _lowerCamelCase : str = os.path.join(lowercase__ , lowercase__ ) if accelerator.process_index == 0: logger.info(f'''Saving model to {output_model_file}''' ) torch.save(lowercase__ , lowercase__ ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: _lowerCamelCase : str = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) _lowerCamelCase : Optional[Any] = os.path.join(lowercase__ , lowercase__ ) logger.info(f'''Saving model to {output_model_file}''' ) torch.save(lowercase__ , lowercase__ ) logger.info(f'''Model saved to {output_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: _lowerCamelCase : List[Any] = os.path.join(lowercase__ , f'''{MODEL_NAME}_{model_index}''' ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) logger.info(f'''Saving model to {ckpt_dir}''' ) _lowerCamelCase : int = {'model': state_dict} dist_cp.save_state_dict( state_dict=lowercase__ , storage_writer=dist_cp.FileSystemWriter(lowercase__ ) , planner=DefaultSavePlanner() , ) logger.info(f'''Model saved to {ckpt_dir}''' ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( lowercase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(lowercase__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return _lowerCamelCase : int = f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin''' _lowerCamelCase : List[Any] = os.path.join(lowercase__ , lowercase__ ) logger.info(f'''Loading model from {input_model_file}''' ) _lowerCamelCase : Optional[Any] = torch.load(lowercase__ ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: _lowerCamelCase : Dict = ( f'''{MODEL_NAME}_rank{accelerator.process_index}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin''' ) _lowerCamelCase : Tuple = os.path.join(lowercase__ , lowercase__ ) logger.info(f'''Loading model from {input_model_file}''' ) _lowerCamelCase : Union[str, Any] = torch.load(lowercase__ ) logger.info(f'''Model loaded from {input_model_file}''' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: _lowerCamelCase : Any = ( os.path.join(lowercase__ , f'''{MODEL_NAME}_{model_index}''' ) if f'''{MODEL_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading model from {ckpt_dir}''' ) _lowerCamelCase : Optional[int] = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=lowercase__ , storage_reader=dist_cp.FileSystemReader(lowercase__ ) , planner=DefaultLoadPlanner() , ) _lowerCamelCase : Dict = state_dict['model'] logger.info(f'''Model loaded from {ckpt_dir}''' ) model.load_state_dict(lowercase__ ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=0 ): os.makedirs(lowercase__ , exist_ok=lowercase__ ) with FSDP.state_dict_type( lowercase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): _lowerCamelCase : Optional[int] = FSDP.optim_state_dict(lowercase__ , lowercase__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: _lowerCamelCase : int = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) _lowerCamelCase : Optional[int] = os.path.join(lowercase__ , lowercase__ ) logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' ) torch.save(lowercase__ , lowercase__ ) logger.info(f'''Optimizer state saved in {output_optimizer_file}''' ) else: _lowerCamelCase : Union[str, Any] = os.path.join(lowercase__ , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) os.makedirs(lowercase__ , exist_ok=lowercase__ ) logger.info(f'''Saving Optimizer state to {ckpt_dir}''' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(lowercase__ ) , planner=DefaultSavePlanner() , ) logger.info(f'''Optimizer state saved in {ckpt_dir}''' ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( lowercase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: _lowerCamelCase : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: _lowerCamelCase : Union[str, Any] = ( f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin''' ) _lowerCamelCase : Optional[Any] = os.path.join(lowercase__ , lowercase__ ) logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' ) _lowerCamelCase : int = torch.load(lowercase__ ) logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' ) else: _lowerCamelCase : Any = ( os.path.join(lowercase__ , f'''{OPTIMIZER_NAME}_{optimizer_index}''' ) if f'''{OPTIMIZER_NAME}''' not in input_dir else input_dir ) logger.info(f'''Loading Optimizer from {ckpt_dir}''' ) _lowerCamelCase : List[str] = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(lowercase__ ) , ) _lowerCamelCase : Dict = optim_state['optimizer'] logger.info(f'''Optimizer loaded from {ckpt_dir}''' ) _lowerCamelCase : List[str] = FSDP.optim_state_dict_to_load(lowercase__ , lowercase__ , lowercase__ ) optimizer.load_state_dict(lowercase__ )
365
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """AutoTokenizer""" lowerCamelCase__ = ["""tokenizer"""] lowerCamelCase__ = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , lowercase , lowercase=None ): super().__init__(lowercase ) _lowerCamelCase : Optional[int] = speaker_embeddings @classmethod def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: _lowerCamelCase : Optional[Any] = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'''`{os.path.join(lowercase , lowercase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) _lowerCamelCase : List[Any] = None else: with open(lowercase ) as speaker_embeddings_json: _lowerCamelCase : Union[str, Any] = json.load(lowercase ) else: _lowerCamelCase : Tuple = None _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase ) _lowerCamelCase : int = {} _lowerCamelCase : List[Any] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase ) _lowerCamelCase : Any = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , ) _lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' ) _lowerCamelCase : Optional[Any] = tmp_dict with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def A_ ( self , lowercase = None , **lowercase ): _lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset] _lowerCamelCase : Any = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) _lowerCamelCase : Union[str, Any] = get_file_from_repo( self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if path is None: raise ValueError( F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) _lowerCamelCase : List[str] = np.load(lowercase ) return voice_preset_dict def A_ ( self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _lowerCamelCase : Any = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ): _lowerCamelCase : Optional[Any] = voice_preset + '.npz' _lowerCamelCase : Union[str, Any] = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) _lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase ) _lowerCamelCase : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: _lowerCamelCase : Optional[int] = voice_preset return encoded_text
12
0
"""simple docstring""" import math def _snake_case ( lowercase__ , lowercase__ ): return math.pow(lowercase__ , 2 ) - a def _snake_case ( lowercase__ ): return 2 * x def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[Any] = 2.0 while start <= a: _lowerCamelCase : List[Any] = math.pow(lowercase__ , 2 ) return start def _snake_case ( lowercase__ , lowercase__ = 9999 , lowercase__ = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ): if a < 0: raise ValueError('math domain error' ) _lowerCamelCase : Optional[Any] = get_initial_point(lowercase__ ) for _ in range(lowercase__ ): _lowerCamelCase : Dict = value _lowerCamelCase : Optional[Any] = value - fx(lowercase__ , lowercase__ ) / fx_derivative(lowercase__ ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
366
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowercase__ = False class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCamelCase : Dict = torch.manual_seed(0 ) _lowerCamelCase : Dict = pipe( image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _lowerCamelCase : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
12
0
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowercase__ = TypeVar("""T""") class lowerCAmelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : List[str] = data _lowerCamelCase : Node[T] | None = None def __str__( self ): return F'''{self.data}''' class lowerCAmelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self ): _lowerCamelCase : Node[T] | None = None def __iter__( self ): _lowerCamelCase : List[Any] = self.top while node: yield node.data _lowerCamelCase : int = node.next def __str__( self ): return "->".join([str(lowercase ) for item in self] ) def __len__( self ): return len(tuple(iter(self ) ) ) def A_ ( self ): return self.top is None def A_ ( self , lowercase ): _lowerCamelCase : Optional[int] = Node(lowercase ) if not self.is_empty(): _lowerCamelCase : Dict = self.top _lowerCamelCase : Optional[Any] = node def A_ ( self ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , lowercase ) _lowerCamelCase : Dict = self.top _lowerCamelCase : int = self.top.next return pop_node.data def A_ ( self ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def A_ ( self ): _lowerCamelCase : Dict = None if __name__ == "__main__": from doctest import testmod testmod()
367
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase__ = { """E""": 12.70, """T""": 9.06, """A""": 8.17, """O""": 7.51, """I""": 6.97, """N""": 6.75, """S""": 6.33, """H""": 6.09, """R""": 5.99, """D""": 4.25, """L""": 4.03, """C""": 2.78, """U""": 2.76, """M""": 2.41, """W""": 2.36, """F""": 2.23, """G""": 2.02, """Y""": 1.97, """P""": 1.93, """B""": 1.29, """V""": 0.98, """K""": 0.77, """J""": 0.15, """X""": 0.15, """Q""": 0.10, """Z""": 0.07, } lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( lowercase__ ): return x[0] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = get_letter_count(lowercase__ ) _lowerCamelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase__ ) _lowerCamelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ ) _lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] ) _lowerCamelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase__ , reverse=lowercase__ ) _lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : str = get_frequency_order(lowercase__ ) _lowerCamelCase : Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed lowercase__ = logging.getLogger(__name__) def _snake_case ( lowercase__=2 , lowercase__=3 , lowercase__=16 , lowercase__ = 10 , lowercase__ = 2 ): def get_dataset(lowercase__ ): _lowerCamelCase : List[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(lowercase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) _lowerCamelCase : Optional[int] = get_dataset(lowercase__ ) _lowerCamelCase : int = get_dataset(lowercase__ ) _lowerCamelCase : Optional[int] = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 ) _lowerCamelCase : List[str] = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ): _lowerCamelCase : List[str] = [] for epoch in range(lowercase__ ): # Train quickly model.train() for batch in dataloader: _lowerCamelCase : Dict = batch _lowerCamelCase : Optional[Any] = model(lowercase__ ) _lowerCamelCase : List[Any] = torch.nn.functional.mse_loss(lowercase__ , lowercase__ ) accelerator.backward(lowercase__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self ): super().__init__() _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.randn(1 ) ) _lowerCamelCase : str = nn.Parameter(torch.randn(1 ) ) def A_ ( self , lowercase ): return x * self.a + self.b class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _lowerCamelCase : Dict = DummyModel() _lowerCamelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _lowerCamelCase : int = dummy_dataloaders() _lowerCamelCase : Union[str, Any] = ProjectConfiguration(total_limit=1 , project_dir=lowercase , automatic_checkpoint_naming=lowercase ) # Train baseline _lowerCamelCase : int = Accelerator(project_config=lowercase ) _lowerCamelCase : Optional[int] = accelerator.prepare( lowercase , lowercase , lowercase , lowercase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def A_ ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _lowerCamelCase : Union[str, Any] = DummyModel() _lowerCamelCase : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _lowerCamelCase : Dict = dummy_dataloaders() # Train baseline _lowerCamelCase : List[str] = Accelerator() _lowerCamelCase : Tuple = accelerator.prepare( lowercase , lowercase , lowercase , lowercase ) # Save initial _lowerCamelCase : str = os.path.join(lowercase , 'initial' ) accelerator.save_state(lowercase ) (_lowerCamelCase) : Union[str, Any] = model.a.item(), model.b.item() _lowerCamelCase : Optional[int] = optimizer.state_dict() _lowerCamelCase : Optional[int] = train(3 , lowercase , lowercase , lowercase , lowercase ) (_lowerCamelCase) : str = model.a.item(), model.b.item() _lowerCamelCase : Tuple = optimizer.state_dict() # Train partially set_seed(42 ) _lowerCamelCase : int = DummyModel() _lowerCamelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _lowerCamelCase : Any = dummy_dataloaders() _lowerCamelCase : Dict = Accelerator() _lowerCamelCase : Optional[int] = accelerator.prepare( lowercase , lowercase , lowercase , lowercase ) accelerator.load_state(lowercase ) (_lowerCamelCase) : Optional[Any] = model.a.item(), model.b.item() _lowerCamelCase : List[str] = optimizer.state_dict() self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) _lowerCamelCase : str = train(2 , lowercase , lowercase , lowercase , lowercase ) # Save everything _lowerCamelCase : int = os.path.join(lowercase , 'checkpoint' ) accelerator.save_state(lowercase ) # Load everything back in and make sure all states work accelerator.load_state(lowercase ) test_rands += train(1 , lowercase , lowercase , lowercase , lowercase ) (_lowerCamelCase) : str = model.a.item(), model.b.item() _lowerCamelCase : List[Any] = optimizer.state_dict() self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) def A_ ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _lowerCamelCase : Optional[int] = DummyModel() _lowerCamelCase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _lowerCamelCase : List[str] = dummy_dataloaders() _lowerCamelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=lowercase ) # Train baseline _lowerCamelCase : str = Accelerator(project_dir=lowercase , project_config=lowercase ) _lowerCamelCase : Dict = accelerator.prepare( lowercase , lowercase , lowercase , lowercase ) # Save initial accelerator.save_state() (_lowerCamelCase) : str = model.a.item(), model.b.item() _lowerCamelCase : Tuple = optimizer.state_dict() _lowerCamelCase : Optional[Any] = train(3 , lowercase , lowercase , lowercase , lowercase ) (_lowerCamelCase) : Optional[Any] = model.a.item(), model.b.item() _lowerCamelCase : List[str] = optimizer.state_dict() # Train partially set_seed(42 ) _lowerCamelCase : str = DummyModel() _lowerCamelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _lowerCamelCase : List[Any] = dummy_dataloaders() _lowerCamelCase : Any = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowercase ) _lowerCamelCase : Tuple = Accelerator(project_dir=lowercase , project_config=lowercase ) _lowerCamelCase : Union[str, Any] = accelerator.prepare( lowercase , lowercase , lowercase , lowercase ) accelerator.load_state(os.path.join(lowercase , 'checkpoints' , 'checkpoint_0' ) ) (_lowerCamelCase) : Tuple = model.a.item(), model.b.item() _lowerCamelCase : int = optimizer.state_dict() self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) _lowerCamelCase : List[str] = train(2 , lowercase , lowercase , lowercase , lowercase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(lowercase , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , lowercase , lowercase , lowercase , lowercase ) (_lowerCamelCase) : str = model.a.item(), model.b.item() _lowerCamelCase : Optional[Any] = optimizer.state_dict() self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = torch.tensor([1, 2, 3] ) _lowerCamelCase : Tuple = torch.tensor([2, 3, 4] ) _lowerCamelCase : Optional[Any] = DummyModel() _lowerCamelCase : Optional[int] = torch.optim.Adam(net.parameters() ) _lowerCamelCase : Optional[int] = Accelerator() with self.assertRaises(lowercase ) as ve: accelerator.register_for_checkpointing(lowercase , lowercase , lowercase , lowercase ) _lowerCamelCase : Optional[int] = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def A_ ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _lowerCamelCase : Tuple = DummyModel() _lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _lowerCamelCase : str = torch.optim.lr_scheduler.StepLR(lowercase , step_size=1 , gamma=0.99 ) _lowerCamelCase : int = dummy_dataloaders() _lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=lowercase ) # Train baseline _lowerCamelCase : Dict = Accelerator(project_dir=lowercase , project_config=lowercase ) _lowerCamelCase : Tuple = accelerator.prepare( lowercase , lowercase , lowercase , lowercase , lowercase ) # Save initial accelerator.save_state() _lowerCamelCase : Optional[Any] = scheduler.state_dict() train(3 , lowercase , lowercase , lowercase , lowercase , lowercase ) self.assertNotEqual(lowercase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(lowercase , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(lowercase , scheduler.state_dict() ) def A_ ( self ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) _lowerCamelCase : str = DummyModel() _lowerCamelCase : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=lowercase , total_limit=2 ) # Train baseline _lowerCamelCase : Optional[int] = Accelerator(project_dir=lowercase , project_config=lowercase ) _lowerCamelCase : int = accelerator.prepare(lowercase ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(lowercase , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowercase , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowercase , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def A_ ( self ): _lowerCamelCase : Dict = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(lowercase , env=os.environ.copy() ) if __name__ == "__main__": lowercase__ = """/tmp/accelerate/state_checkpointing""" lowercase__ = DummyModel() lowercase__ = torch.optim.Adam(params=model.parameters(), lr=1E-3) lowercase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) lowercase__ , lowercase__ = dummy_dataloaders() lowercase__ = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline lowercase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) lowercase__ , lowercase__ = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: lowercase__ = group["""params"""][0].device break assert param_device.type == accelerator.device.type lowercase__ = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""") for group in optimizer.param_groups: lowercase__ = group["""params"""][0].device break assert ( param_device.type == torch.device("""cpu""").type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""") for group in optimizer.param_groups: lowercase__ = group["""params"""][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""): accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
368
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase ): _lowerCamelCase : Dict = question_encoder _lowerCamelCase : List[Any] = generator _lowerCamelCase : Optional[Any] = self.question_encoder def A_ ( self , lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' ) _lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' ) self.question_encoder.save_pretrained(lowercase ) self.generator.save_pretrained(lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase ) if config is None: _lowerCamelCase : int = RagConfig.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' ) _lowerCamelCase : Dict = AutoTokenizer.from_pretrained( lowercase , config=config.generator , subfolder='generator_tokenizer' ) return cls(question_encoder=lowercase , generator=lowercase ) def __call__( self , *lowercase , **lowercase ): return self.current_tokenizer(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.decode(*lowercase , **lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.question_encoder def A_ ( self ): _lowerCamelCase : Optional[Any] = self.generator def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ): warnings.warn( '`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ' 'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ' 'context manager to prepare your targets. See the documentation of your specific tokenizer for more ' 'details' , lowercase , ) if max_length is None: _lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length _lowerCamelCase : Optional[Any] = self( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _lowerCamelCase : int = self.current_tokenizer.model_max_length _lowerCamelCase : str = self( text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , ) _lowerCamelCase : int = labels['input_ids'] return model_inputs
12
0
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): if mass < 0: raise ValueError('The mass of a body cannot be negative' ) return 0.5 * mass * abs(lowercase__ ) * abs(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
369
"""simple docstring""" def _snake_case ( lowercase__ = 10 ): if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError('Invalid input' ) _lowerCamelCase : str = 10**n _lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"{solution(10) = }")
12
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase__ = { """configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""], """tokenization_roformer""": ["""RoFormerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""RoFormerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoFormerForCausalLM""", """RoFormerForMaskedLM""", """RoFormerForMultipleChoice""", """RoFormerForQuestionAnswering""", """RoFormerForSequenceClassification""", """RoFormerForTokenClassification""", """RoFormerLayer""", """RoFormerModel""", """RoFormerPreTrainedModel""", """load_tf_weights_in_roformer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRoFormerForCausalLM""", """TFRoFormerForMaskedLM""", """TFRoFormerForMultipleChoice""", """TFRoFormerForQuestionAnswering""", """TFRoFormerForSequenceClassification""", """TFRoFormerForTokenClassification""", """TFRoFormerLayer""", """TFRoFormerModel""", """TFRoFormerPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxRoFormerForMaskedLM""", """FlaxRoFormerForMultipleChoice""", """FlaxRoFormerForQuestionAnswering""", """FlaxRoFormerForSequenceClassification""", """FlaxRoFormerForTokenClassification""", """FlaxRoFormerModel""", """FlaxRoFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
370
"""simple docstring""" import argparse import datetime def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } _lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase__ ) < 11: raise ValueError('Must be 10 characters long' ) # Get month _lowerCamelCase : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) _lowerCamelCase : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day _lowerCamelCase : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator _lowerCamelCase : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year _lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation _lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) ) # Start math if m <= 2: _lowerCamelCase : str = y - 1 _lowerCamelCase : Tuple = m + 12 # maths var _lowerCamelCase : int = int(str(lowercase__ )[:2] ) _lowerCamelCase : int = int(str(lowercase__ )[2:] ) _lowerCamelCase : int = int(2.6 * m - 5.3_9 ) _lowerCamelCase : int = int(c / 4 ) _lowerCamelCase : int = int(k / 4 ) _lowerCamelCase : int = int(d + k ) _lowerCamelCase : int = int(t + u + v + x ) _lowerCamelCase : int = int(z - (2 * c) ) _lowerCamelCase : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response _lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) lowercase__ = parser.parse_args() zeller(args.date_input)
12
0
import argparse import os import re import packaging.version lowercase__ = """examples/""" lowercase__ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } lowercase__ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } lowercase__ = """README.md""" def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowerCamelCase : Optional[Any] = f.read() _lowerCamelCase : Union[str, Any] = REPLACE_PATTERNS[pattern] _lowerCamelCase : Dict = replace.replace('VERSION' , lowercase__ ) _lowerCamelCase : int = re_pattern.sub(lowercase__ , lowercase__ ) with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(lowercase__ ) def _snake_case ( lowercase__ ): for folder, directories, fnames in os.walk(lowercase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern='examples' ) def _snake_case ( lowercase__ , lowercase__=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(lowercase__ , lowercase__ , lowercase__ ) if not patch: update_version_in_examples(lowercase__ ) def _snake_case ( ): _lowerCamelCase : Union[str, Any] = '🤗 Transformers currently provides the following architectures' _lowerCamelCase : Tuple = '1. Want to contribute a new model?' with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowerCamelCase : Union[str, Any] = f.readlines() # Find the start of the list. _lowerCamelCase : List[str] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _lowerCamelCase : List[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): _lowerCamelCase : List[Any] = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(lowercase__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(lowercase__ ) def _snake_case ( ): with open(REPLACE_FILES['init'] , 'r' ) as f: _lowerCamelCase : int = f.read() _lowerCamelCase : int = REPLACE_PATTERNS['init'][0].search(lowercase__ ).groups()[0] return packaging.version.parse(lowercase__ ) def _snake_case ( lowercase__=False ): _lowerCamelCase : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: _lowerCamelCase : Any = default_version.base_version elif patch: _lowerCamelCase : List[Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: _lowerCamelCase : Optional[Any] = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. _lowerCamelCase : List[str] = input(f'''Which version are you releasing? [{default_version}]''' ) if len(lowercase__ ) == 0: _lowerCamelCase : Any = default_version print(f'''Updating version to {version}.''' ) global_version_update(lowercase__ , patch=lowercase__ ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def _snake_case ( ): _lowerCamelCase : Tuple = get_version() _lowerCamelCase : List[Any] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' _lowerCamelCase : List[str] = current_version.base_version # Check with the user we got that right. _lowerCamelCase : str = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(lowercase__ ) == 0: _lowerCamelCase : str = dev_version print(f'''Updating version to {version}.''' ) global_version_update(lowercase__ ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") lowercase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
371
"""simple docstring""" import re def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' ) if match := re.search(lowercase__ , lowercase__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
12
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = BlenderbotConfig lowerCamelCase__ = {} lowerCamelCase__ = """gelu""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ): _lowerCamelCase : Union[str, Any] = parent _lowerCamelCase : Optional[Any] = batch_size _lowerCamelCase : int = seq_length _lowerCamelCase : int = is_training _lowerCamelCase : List[str] = use_labels _lowerCamelCase : Union[str, Any] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : int = num_hidden_layers _lowerCamelCase : Any = num_attention_heads _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : List[Any] = max_position_embeddings _lowerCamelCase : Optional[Any] = eos_token_id _lowerCamelCase : Dict = pad_token_id _lowerCamelCase : Dict = bos_token_id def A_ ( self ): _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCamelCase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _lowerCamelCase : int = prepare_blenderbot_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : List[Any] = TFBlenderbotModel(config=lowercase ).get_decoder() _lowerCamelCase : Dict = inputs_dict['input_ids'] _lowerCamelCase : List[Any] = input_ids[:1, :] _lowerCamelCase : List[str] = inputs_dict['attention_mask'][:1, :] _lowerCamelCase : Tuple = inputs_dict['head_mask'] _lowerCamelCase : Optional[Any] = 1 # first forward pass _lowerCamelCase : Tuple = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) _lowerCamelCase : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCamelCase : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowerCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 ) _lowerCamelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowerCamelCase : Dict = model(lowercase , attention_mask=lowercase )[0] _lowerCamelCase : int = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowerCamelCase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowerCamelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ): if attention_mask is None: _lowerCamelCase : List[Any] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _lowerCamelCase : Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _lowerCamelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowerCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () lowerCamelCase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () lowerCamelCase__ = ( { """conversational""": TFBlenderbotForConditionalGeneration, """feature-extraction""": TFBlenderbotModel, """summarization""": TFBlenderbotForConditionalGeneration, """text2text-generation""": TFBlenderbotForConditionalGeneration, """translation""": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = False def A_ ( self ): _lowerCamelCase : Optional[Any] = TFBlenderbotModelTester(self ) _lowerCamelCase : List[str] = ConfigTester(self , config_class=lowercase ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ["""My friends are cool but they eat too many carbs."""] lowerCamelCase__ = """facebook/blenderbot-400M-distill""" @cached_property def A_ ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def A_ ( self ): _lowerCamelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def A_ ( self ): _lowerCamelCase : Tuple = self.tokenizer(self.src_text , return_tensors='tf' ) _lowerCamelCase : str = self.model.generate( model_inputs.input_ids , ) _lowerCamelCase : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
350
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowercase__ = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} lowercase__ = """zero2""" lowercase__ = """zero3""" lowercase__ = [ZEROa, ZEROa] def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test lowercase__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) def A_ ( self , lowercase ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = models[model] _lowerCamelCase : Optional[int] = self.run_trainer( stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , ) self.do_checks(lowercase ) return output_dir def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase ) _lowerCamelCase : Any = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowercase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _lowerCamelCase : Dict = self.get_launcher(lowercase ) _lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase , env=self.get_env() ) return output_dir def A_ ( self , lowercase=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
12
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """upernet""" def __init__( self , lowercase=None , lowercase=512 , lowercase=0.02 , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=384 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ): super().__init__(**lowercase ) if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _lowerCamelCase : Any = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) elif isinstance(lowercase , lowercase ): _lowerCamelCase : int = backbone_config.get('model_type' ) _lowerCamelCase : str = CONFIG_MAPPING[backbone_model_type] _lowerCamelCase : List[Any] = config_class.from_dict(lowercase ) _lowerCamelCase : List[Any] = backbone_config _lowerCamelCase : str = hidden_size _lowerCamelCase : Dict = initializer_range _lowerCamelCase : int = pool_scales _lowerCamelCase : List[str] = use_auxiliary_head _lowerCamelCase : Optional[Any] = auxiliary_loss_weight _lowerCamelCase : Optional[int] = auxiliary_in_channels _lowerCamelCase : Any = auxiliary_channels _lowerCamelCase : Dict = auxiliary_num_convs _lowerCamelCase : Optional[Any] = auxiliary_concat_input _lowerCamelCase : str = loss_ignore_index def A_ ( self ): _lowerCamelCase : str = copy.deepcopy(self.__dict__ ) _lowerCamelCase : int = self.backbone_config.to_dict() _lowerCamelCase : List[str] = self.__class__.model_type return output
351
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""pixel_values"""] def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ): super().__init__(**lowercase ) _lowerCamelCase : Optional[Any] = do_rescale _lowerCamelCase : Union[str, Any] = rescale_factor _lowerCamelCase : Any = do_pad _lowerCamelCase : Optional[int] = pad_size def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase = None ): _lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase ) _lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height _lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase ) def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): _lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad _lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size _lowerCamelCase : Dict = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. _lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images] if do_rescale: _lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_pad: _lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images] _lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] _lowerCamelCase : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=lowercase , tensor_type=lowercase )
12
0
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process lowercase__ = logging.getLogger(__name__) lowercase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) lowercase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default=lowercase, metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) }, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase )}, ) lowerCamelCase__ = field( default=lowercase, metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) }, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""}, ) lowerCamelCase__ = field( default="""main""", metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) }, ) def A_ ( self ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """The input training data file (a text file)."""} ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) lowerCamelCase__ = field( default=5, metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" }, ) lowerCamelCase__ = field( default=lowercase, metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) }, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """The number of processes to use for the preprocessing."""}, ) lowerCamelCase__ = field( default=0.15, metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) lowerCamelCase__ = field( default=lowercase, metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) }, ) def A_ ( self ): if self.train_file is not None: _lowerCamelCase : List[Any] = self.train_file.split('.' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: _lowerCamelCase : Optional[Any] = self.validation_file.split('.' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def _snake_case ( lowercase__ , lowercase__ ): with open(lowercase__ , 'r' , encoding='utf-8' ) as f: _lowerCamelCase : Dict = [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())] assert len(lowercase__ ) == len(lowercase__ ) _lowerCamelCase : Tuple = {c: dataset[c] for c in dataset.column_names} _lowerCamelCase : Tuple = refs return Dataset.from_dict(lowercase__ ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _lowerCamelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. _lowerCamelCase : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , lowercase__ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _lowerCamelCase : List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): _lowerCamelCase : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , ) _lowerCamelCase : List[str] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , ) else: _lowerCamelCase : Union[str, Any] = {} if data_args.train_file is not None: _lowerCamelCase : Union[str, Any] = data_args.train_file if data_args.validation_file is not None: _lowerCamelCase : List[str] = data_args.validation_file _lowerCamelCase : List[str] = data_args.train_file.split('.' )[-1] if extension == "txt": _lowerCamelCase : int = 'text' _lowerCamelCase : Tuple = load_dataset(lowercase__ , data_files=lowercase__ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : Union[str, Any] = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = AutoConfig.from_pretrained(model_args.config_name , **lowercase__ ) elif model_args.model_name_or_path: _lowerCamelCase : List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: _lowerCamelCase : List[str] = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(f'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(f'''New config: {config}''' ) _lowerCamelCase : int = { 'cache_dir': model_args.cache_dir, 'use_fast': model_args.use_fast_tokenizer, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: _lowerCamelCase : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ ) elif model_args.model_name_or_path: _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) if model_args.model_name_or_path: _lowerCamelCase : Optional[int] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _lowerCamelCase : List[Any] = AutoModelForMaskedLM.from_config(lowercase__ ) model.resize_token_embeddings(len(lowercase__ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: _lowerCamelCase : Union[str, Any] = datasets['train'].column_names else: _lowerCamelCase : str = datasets['validation'].column_names _lowerCamelCase : List[Any] = 'text' if 'text' in column_names else column_names[0] _lowerCamelCase : Optional[Any] = 'max_length' if data_args.pad_to_max_length else False def tokenize_function(lowercase__ ): # Remove empty lines _lowerCamelCase : Optional[Any] = [line for line in examples['text'] if len(lowercase__ ) > 0 and not line.isspace()] return tokenizer(examples['text'] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length ) _lowerCamelCase : List[Any] = datasets.map( lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: _lowerCamelCase : Optional[Any] = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: _lowerCamelCase : Dict = add_chinese_references( tokenized_datasets['validation'] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer _lowerCamelCase : int = data_args.train_ref_file or data_args.validation_ref_file if has_ref: _lowerCamelCase : str = False # Data collator # This one will take care of randomly masking the tokens. _lowerCamelCase : Union[str, Any] = DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer _lowerCamelCase : str = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , ) # Training if training_args.do_train: if last_checkpoint is not None: _lowerCamelCase : Any = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): _lowerCamelCase : Optional[Any] = model_args.model_name_or_path else: _lowerCamelCase : str = None _lowerCamelCase : Optional[int] = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() # Saves the tokenizer too for easy upload _lowerCamelCase : List[Any] = os.path.join(training_args.output_dir , 'train_results.txt' ) if trainer.is_world_process_zero(): with open(lowercase__ , 'w' ) as writer: logger.info('***** Train results *****' ) for key, value in sorted(train_result.metrics.items() ): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # Evaluation _lowerCamelCase : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _lowerCamelCase : int = trainer.evaluate() _lowerCamelCase : int = math.exp(eval_output['eval_loss'] ) _lowerCamelCase : int = perplexity _lowerCamelCase : Optional[Any] = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' ) if trainer.is_world_process_zero(): with open(lowercase__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in sorted(results.items() ): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) return results def _snake_case ( lowercase__ ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
352
"""simple docstring""" import os import string import sys lowercase__ = 1 << 8 lowercase__ = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } lowercase__ = KEYMAP["""up"""] lowercase__ = KEYMAP["""left"""] if sys.platform == "win32": lowercase__ = [] lowercase__ = { B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): lowercase__ = ord(str(i)) def _snake_case ( ): if os.name == "nt": import msvcrt _lowerCamelCase : Any = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowercase__ ) == 0: # Read the keystroke _lowerCamelCase : str = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCamelCase : List[Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(lowercase__ ) if ord(lowercase__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _lowerCamelCase : List[Any] = chr(KEYMAP['esc'] ) except KeyError: _lowerCamelCase : int = cha[1] else: _lowerCamelCase : Optional[int] = ch.decode(lowercase__ ) else: _lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCamelCase : List[str] = sys.stdin.fileno() _lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ ) try: tty.setraw(lowercase__ ) _lowerCamelCase : Optional[Any] = sys.stdin.read(1 ) finally: termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ ) return ch def _snake_case ( ): _lowerCamelCase : int = get_raw_chars() if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowercase__ ) == KEYMAP["esc"]: _lowerCamelCase : Union[str, Any] = get_raw_chars() if ord(lowercase__ ) == KEYMAP["mod_int"]: _lowerCamelCase : List[Any] = get_raw_chars() if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowercase__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
12
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase__ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
353
"""simple docstring""" from typing import Any def _snake_case ( lowercase__ ): if not input_list: return [] _lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list] _lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name lowercase__ = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def _snake_case ( lowercase__ , lowercase__ , lowercase__=8 ): _lowerCamelCase : Optional[int] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _lowerCamelCase : str = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , ): super().__init__() self.register_modules( unet=lowercase , scheduler=lowercase , movq=lowercase , ) _lowerCamelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): if latents is None: _lowerCamelCase : str = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _lowerCamelCase : Any = latents.to(lowercase ) _lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma return latents def A_ ( self , lowercase=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) _lowerCamelCase : List[str] = torch.device(F'''cuda:{gpu_id}''' ) _lowerCamelCase : Any = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase , lowercase ) def A_ ( self , lowercase=0 ): if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) _lowerCamelCase : List[Any] = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=lowercase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _lowerCamelCase : Tuple = None for cpu_offloaded_model in [self.unet, self.movq]: _lowerCamelCase : List[str] = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase ) # We'll offload the last model manually. _lowerCamelCase : Union[str, Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def A_ ( self ): if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase ) def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 100 , lowercase = 4.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , ): _lowerCamelCase : Union[str, Any] = self._execution_device _lowerCamelCase : Optional[int] = guidance_scale > 1.0 if isinstance(lowercase , lowercase ): _lowerCamelCase : int = torch.cat(lowercase , dim=0 ) if isinstance(lowercase , lowercase ): _lowerCamelCase : Union[str, Any] = torch.cat(lowercase , dim=0 ) if isinstance(lowercase , lowercase ): _lowerCamelCase : Any = torch.cat(lowercase , dim=0 ) _lowerCamelCase : Optional[int] = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: _lowerCamelCase : Any = image_embeds.repeat_interleave(lowercase , dim=0 ) _lowerCamelCase : List[str] = negative_image_embeds.repeat_interleave(lowercase , dim=0 ) _lowerCamelCase : List[str] = hint.repeat_interleave(lowercase , dim=0 ) _lowerCamelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase ) _lowerCamelCase : Union[str, Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase ) self.scheduler.set_timesteps(lowercase , device=lowercase ) _lowerCamelCase : int = self.scheduler.timesteps _lowerCamelCase : Optional[Any] = self.movq.config.latent_channels _lowerCamelCase : Tuple = downscale_height_and_width(lowercase , lowercase , self.movq_scale_factor ) # create initial latent _lowerCamelCase : Any = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowercase ) ): # expand the latents if we are doing classifier free guidance _lowerCamelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCamelCase : Union[str, Any] = {'image_embeds': image_embeds, 'hint': hint} _lowerCamelCase : Optional[Any] = self.unet( sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0] if do_classifier_free_guidance: _lowerCamelCase : int = noise_pred.split(latents.shape[1] , dim=1 ) _lowerCamelCase : List[Any] = noise_pred.chunk(2 ) _lowerCamelCase : Optional[int] = variance_pred.chunk(2 ) _lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _lowerCamelCase : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _lowerCamelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _lowerCamelCase : Any = self.scheduler.step( lowercase , lowercase , lowercase , generator=lowercase , )[0] # post-processing _lowerCamelCase : int = self.movq.decode(lowercase , force_not_quantize=lowercase )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: _lowerCamelCase : Dict = image * 0.5 + 0.5 _lowerCamelCase : Any = image.clamp(0 , 1 ) _lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _lowerCamelCase : List[Any] = self.numpy_to_pil(lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase )
354
"""simple docstring""" def _snake_case ( lowercase__ ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection _lowerCamelCase : List[str] = len(lowercase__ ) _lowerCamelCase : List[str] = max(lowercase__ ) _lowerCamelCase : List[str] = min(lowercase__ ) # create the counting array _lowerCamelCase : List[Any] = coll_max + 1 - coll_min _lowerCamelCase : List[Any] = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): _lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1] # create the output collection _lowerCamelCase : Dict = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): _lowerCamelCase : Any = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _snake_case ( lowercase__ ): return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt" lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(counting_sort(unsorted))
12
0
"""simple docstring""" import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , *lowercase , lowercase=None , lowercase=None , **lowercase ): super().__init__(*lowercase , **lowercase ) _lowerCamelCase : Tuple = eval_examples _lowerCamelCase : List[Any] = post_process_function def A_ ( self , lowercase = None , lowercase=None , lowercase = None , lowercase = "eval" , **lowercase , ): _lowerCamelCase : str = gen_kwargs.copy() _lowerCamelCase : Any = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length ) _lowerCamelCase : Optional[Any] = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams ) _lowerCamelCase : List[Any] = gen_kwargs _lowerCamelCase : Dict = self.eval_dataset if eval_dataset is None else eval_dataset _lowerCamelCase : Optional[int] = self.get_eval_dataloader(lowercase ) _lowerCamelCase : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _lowerCamelCase : int = self.compute_metrics _lowerCamelCase : List[Any] = None _lowerCamelCase : Optional[int] = time.time() _lowerCamelCase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _lowerCamelCase : List[Any] = eval_loop( lowercase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , ) finally: _lowerCamelCase : Optional[int] = compute_metrics _lowerCamelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _lowerCamelCase : int = self.post_process_function(lowercase , lowercase , lowercase ) _lowerCamelCase : int = self.compute_metrics(lowercase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _lowerCamelCase : Optional[int] = metrics.pop(lowercase ) metrics.update(output.metrics ) else: _lowerCamelCase : Optional[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowercase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _lowerCamelCase : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase ) return metrics def A_ ( self , lowercase , lowercase , lowercase=None , lowercase = "test" , **lowercase ): _lowerCamelCase : str = gen_kwargs.copy() _lowerCamelCase : Any = self.get_test_dataloader(lowercase ) # Temporarily disable metric computation, we will do it in the loop here. _lowerCamelCase : int = self.compute_metrics _lowerCamelCase : Tuple = None _lowerCamelCase : List[str] = time.time() _lowerCamelCase : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: _lowerCamelCase : Dict = eval_loop( lowercase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase , metric_key_prefix=lowercase , ) finally: _lowerCamelCase : Tuple = compute_metrics _lowerCamelCase : Optional[int] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowercase , lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _lowerCamelCase : Optional[Any] = self.post_process_function(lowercase , lowercase , lowercase , 'predict' ) _lowerCamelCase : Optional[int] = self.compute_metrics(lowercase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _lowerCamelCase : int = metrics.pop(lowercase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase )
355
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) lowercase__ = parser.parse_args() lowercase__ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
12
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default="""codeparrot/codeparrot""", metadata={"""help""": """Model name or path of model to be trained."""} ) lowerCamelCase__ = field( default="""./""", metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} ) lowerCamelCase__ = field( default="""codeparrot/codeparrot-clean-train""", metadata={"""help""": """Name or path of training dataset."""} ) lowerCamelCase__ = field( default="""codeparrot/codeparrot-clean-valid""", metadata={"""help""": """Name or path of validation dataset."""} ) lowerCamelCase__ = field(default=2, metadata={"""help""": """Batch size for training."""} ) lowerCamelCase__ = field(default=2, metadata={"""help""": """Batch size for evaluation."""} ) lowerCamelCase__ = field(default=0.1, metadata={"""help""": """Value of weight decay."""} ) lowerCamelCase__ = field( default=1_00_00, metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} ) lowerCamelCase__ = field(default=2e-4, metadata={"""help""": """Learning rate fo training."""} ) lowerCamelCase__ = field(default="""cosine""", metadata={"""help""": """Learning rate."""} ) lowerCamelCase__ = field( default=7_50, metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} ) lowerCamelCase__ = field( default=16, metadata={"""help""": """Number of gradient accumulation steps."""} ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} ) lowerCamelCase__ = field(default=5_00_00, metadata={"""help""": """Maximum number of training steps."""} ) lowerCamelCase__ = field( default=-1, metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) lowerCamelCase__ = field(default=10_24, metadata={"""help""": """Sequence lengths used for training."""} ) lowerCamelCase__ = field(default=1, metadata={"""help""": """Training seed."""} ) lowerCamelCase__ = field( default=10_24, metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} ) lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """If True the data is pretokenized."""} ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default="""codeparrot/codeparrot""", metadata={"""help""": """Model name or path of model to be evaluated."""} ) lowerCamelCase__ = field( default="""codeparrot/codeparrot-clean-valid""", metadata={"""help""": """Name or path of validation dataset."""} ) lowerCamelCase__ = field(default=2, metadata={"""help""": """Batch size used for evaluation."""} ) lowerCamelCase__ = field( default=-1, metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) lowerCamelCase__ = field(default=10_24, metadata={"""help""": """Length of sequences to be evaluated."""} ) lowerCamelCase__ = field(default=1, metadata={"""help""": """Random seed used for evaluation."""} ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default="""codeparrot/codeparrot""", metadata={"""help""": """Model name or path of model to be evaluated."""} ) lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """Number of workers used for code evaluation."""} ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """Sample from the language model's output distribution."""} ) lowerCamelCase__ = field(default=0.2, metadata={"""help""": """Sampling temperature used for generation."""} ) lowerCamelCase__ = field(default=2_56, metadata={"""help""": """Maximum number of newly generated tokens."""} ) lowerCamelCase__ = field(default=0, metadata={"""help""": """Top-k parameter used for generation."""} ) lowerCamelCase__ = field(default=0.95, metadata={"""help""": """Top-p parameter used for nucleus sampling."""} ) lowerCamelCase__ = field(default=10, metadata={"""help""": """Number of generations to run in parallel."""} ) lowerCamelCase__ = field( default=2_00, metadata={"""help""": """Number of completions to generate for each sample."""} ) lowerCamelCase__ = field(default=1, metadata={"""help""": """Random seed used for evaluation."""} ) lowerCamelCase__ = field( default="""eval_results.json""", metadata={"""help""": """Random seed used for evaluation."""} ) lowerCamelCase__ = field( default="""0""", metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} ) lowerCamelCase__ = field( default=-1, metadata={ """help""": ( """Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive""" """ number corresponds to which GPU device id to run on.""" ) }, ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default=lowercase, metadata={ """help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.""" }, ) lowerCamelCase__ = field( default="""transformersbook/codeparrot""", metadata={"""help""": """Folder or name of dataset to process."""} ) lowerCamelCase__ = field( default="""codeparrot-clean""", metadata={"""help""": """Folder to save processed processed dataset."""} ) lowerCamelCase__ = field( default=10_00_00, metadata={"""help""": """Number of files to save per JSON output file."""} ) lowerCamelCase__ = field(default="""content""", metadata={"""help""": """Column containing text data to process."""} ) lowerCamelCase__ = field( default=10_00, metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} ) lowerCamelCase__ = field( default=1_00, metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} ) lowerCamelCase__ = field( default=0.25, metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} ) lowerCamelCase__ = field( default=1.5, metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} ) lowerCamelCase__ = field( default=0.7, metadata={"""help""": """Probability for filtering config, test and uncommon files."""} ) lowerCamelCase__ = field( default="""codeparrot/codeparrot""", metadata={"""help""": """Name or path to the tokenizer."""}, ) lowerCamelCase__ = field( default=lowercase, metadata={"""help""": """If True, near-duplicate samples are removed."""} ) lowerCamelCase__ = field( default=0.85, metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default="""gpt2""", metadata={"""help""": """Base tokenizer to build new tokenizer from."""} ) lowerCamelCase__ = field( default="""transformersbook/codeparrot-train""", metadata={"""help""": """Dataset to train tokenizer on."""} ) lowerCamelCase__ = field(default="""content""", metadata={"""help""": """Column containing text data to process."""} ) lowerCamelCase__ = field(default=20_00_00, metadata={"""help""": """Number of examples to train tokenizer on."""} ) lowerCamelCase__ = field( default=3_27_68, metadata={"""help""": """Number of examples to train the tokenizer on."""} ) lowerCamelCase__ = field(default="""codeparrot""", metadata={"""help""": """Name of new tokenizer."""} ) lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """Push saved tokenizer to the hub."""} ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default="""codeparrot/codeparrot""", metadata={"""help""": """Name or path to the tokenizer."""} ) lowerCamelCase__ = field( default="""codeparrot/codeparrot-clean-train""", metadata={"""help""": """Name or path to the dataset to pretokenize."""} ) lowerCamelCase__ = field( default="""tokenized-codeparrot-train""", metadata={"""help""": """Repo name of the pretokenized data."""} ) lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """Number of workers used for code evaluation."""} ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = field( default="""gpt2-large""", metadata={"""help""": """Configuration to use for model initialization."""} ) lowerCamelCase__ = field( default="""codeparrot/codeparrot""", metadata={"""help""": """Tokenizer attached to model."""} ) lowerCamelCase__ = field(default="""codeparrot""", metadata={"""help""": """Name of the created model."""} ) lowerCamelCase__ = field(default=lowercase, metadata={"""help""": """Push saved tokenizer to the hub."""} )
356
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = (UnCLIPScheduler,) def A_ ( self , **lowercase ): _lowerCamelCase : Any = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**lowercase ) return config def A_ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def A_ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=lowercase ) def A_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase ) def A_ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=lowercase ) def A_ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=lowercase ) def A_ ( self ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=lowercase , prev_timestep=lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' ) _lowerCamelCase : str = scheduler_class(**lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5 def A_ ( self ): _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' ) _lowerCamelCase : int = scheduler_class(**lowercase ) _lowerCamelCase : List[str] = 0.5 assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5 def A_ ( self ): _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : Tuple = scheduler_class(**lowercase ) _lowerCamelCase : Union[str, Any] = scheduler.timesteps _lowerCamelCase : Any = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter _lowerCamelCase : Optional[int] = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : Tuple = model(lowercase , lowercase ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample _lowerCamelCase : Optional[int] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def A_ ( self ): _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Optional[Any] = scheduler_class(**lowercase ) scheduler.set_timesteps(25 ) _lowerCamelCase : Optional[Any] = scheduler.timesteps _lowerCamelCase : Optional[int] = self.dummy_model() _lowerCamelCase : Any = self.dummy_sample_deter _lowerCamelCase : str = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : List[Any] = model(lowercase , lowercase ) if i + 1 == timesteps.shape[0]: _lowerCamelCase : Optional[int] = None else: _lowerCamelCase : List[str] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Union[str, Any] = scheduler.step( lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample _lowerCamelCase : List[Any] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def A_ ( self ): pass def A_ ( self ): pass
12
0
"""simple docstring""" import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowercase__ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowercase__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) lowercase__ = spec.loader.load_module() lowercase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowercase__ = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") lowercase__ = { """CLIPConfigMixin""", """DecisionTransformerConfigMixin""", """EncoderDecoderConfigMixin""", """RagConfigMixin""", """SpeechEncoderDecoderConfigMixin""", """VisionEncoderDecoderConfigMixin""", """VisionTextDualEncoderConfigMixin""", } def _snake_case ( ): _lowerCamelCase : List[Any] = [] for config_class in list(CONFIG_MAPPING.values() ): _lowerCamelCase : Any = False # source code of `config_class` _lowerCamelCase : Union[str, Any] = inspect.getsource(lowercase__ ) _lowerCamelCase : Optional[Any] = _re_checkpoint.findall(lowercase__ ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` _lowerCamelCase : int = checkpoint # verify the checkpoint name corresponds to the checkpoint link _lowerCamelCase : Optional[Any] = f'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: _lowerCamelCase : int = True break _lowerCamelCase : Optional[int] = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowercase__ ) if len(lowercase__ ) > 0: _lowerCamelCase : Any = '\n'.join(sorted(lowercase__ ) ) raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
357
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """data2vec-audio""" def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) _lowerCamelCase : str = hidden_size _lowerCamelCase : str = feat_extract_activation _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Optional[Any] = conv_bias _lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings _lowerCamelCase : List[Any] = num_conv_pos_embedding_groups _lowerCamelCase : List[Any] = conv_pos_kernel_size _lowerCamelCase : Optional[int] = len(self.conv_dim ) _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Any = intermediate_size _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Any = hidden_dropout _lowerCamelCase : Union[str, Any] = attention_dropout _lowerCamelCase : str = activation_dropout _lowerCamelCase : Any = feat_proj_dropout _lowerCamelCase : Tuple = final_dropout _lowerCamelCase : Union[str, Any] = layerdrop _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Optional[Any] = mask_time_prob _lowerCamelCase : List[Any] = mask_time_length _lowerCamelCase : List[Any] = mask_time_min_masks _lowerCamelCase : Tuple = mask_feature_prob _lowerCamelCase : Optional[Any] = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # ctc loss _lowerCamelCase : Tuple = ctc_loss_reduction _lowerCamelCase : str = ctc_zero_infinity # adapter _lowerCamelCase : Union[str, Any] = add_adapter _lowerCamelCase : List[Any] = adapter_kernel_size _lowerCamelCase : Optional[Any] = adapter_stride _lowerCamelCase : List[Any] = num_adapter_layers _lowerCamelCase : int = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCamelCase : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCamelCase : List[str] = list(lowercase ) _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Any = list(lowercase ) _lowerCamelCase : Optional[Any] = xvector_output_dim @property def A_ ( self ): return math.prod(self.conv_stride )
12
0
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) _lowerCamelCase : Union[str, Any] = str(bin(lowercase__ ) )[2:] # remove the leading "0b" _lowerCamelCase : List[Any] = str(bin(lowercase__ ) )[2:] # remove the leading "0b" _lowerCamelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
358
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool lowercase__ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """facebook/nllb-200-distilled-600M""" lowerCamelCase__ = ( """This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """ """be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """ """which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """ """plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`.""" ) lowerCamelCase__ = """translator""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = LANGUAGE_CODES lowerCamelCase__ = ["""text""", """text""", """text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase , lowercase , lowercase ): if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) _lowerCamelCase : str = self.lang_to_code[src_lang] _lowerCamelCase : int = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase ) def A_ ( self , lowercase ): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase )
12
0
import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING lowerCamelCase__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Dict = AudioClassificationPipeline(model=lowercase , feature_extractor=lowercase ) # test with a raw waveform _lowerCamelCase : Tuple = np.zeros((34000,) ) _lowerCamelCase : str = np.zeros((14000,) ) return audio_classifier, [audioa, audio] def A_ ( self , lowercase , lowercase ): _lowerCamelCase : List[Any] = examples _lowerCamelCase : Tuple = audio_classifier(lowercase ) # by default a model is initialized with num_labels=2 self.assertEqual( lowercase , [ {'score': ANY(lowercase ), 'label': ANY(lowercase )}, {'score': ANY(lowercase ), 'label': ANY(lowercase )}, ] , ) _lowerCamelCase : List[Any] = audio_classifier(lowercase , top_k=1 ) self.assertEqual( lowercase , [ {'score': ANY(lowercase ), 'label': ANY(lowercase )}, ] , ) self.run_torchaudio(lowercase ) @require_torchaudio def A_ ( self , lowercase ): import datasets # test with a local file _lowerCamelCase : Union[str, Any] = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) _lowerCamelCase : Dict = dataset[0]['audio']['array'] _lowerCamelCase : Any = audio_classifier(lowercase ) self.assertEqual( lowercase , [ {'score': ANY(lowercase ), 'label': ANY(lowercase )}, {'score': ANY(lowercase ), 'label': ANY(lowercase )}, ] , ) @require_torch def A_ ( self ): _lowerCamelCase : int = 'anton-l/wav2vec2-random-tiny-classifier' _lowerCamelCase : List[Any] = pipeline('audio-classification' , model=lowercase ) _lowerCamelCase : Tuple = np.ones((8000,) ) _lowerCamelCase : Optional[Any] = audio_classifier(lowercase , top_k=4 ) _lowerCamelCase : Optional[Any] = [ {'score': 0.08_42, 'label': 'no'}, {'score': 0.08_38, 'label': 'up'}, {'score': 0.08_37, 'label': 'go'}, {'score': 0.08_34, 'label': 'right'}, ] _lowerCamelCase : Tuple = [ {'score': 0.08_45, 'label': 'stop'}, {'score': 0.08_44, 'label': 'on'}, {'score': 0.08_41, 'label': 'right'}, {'score': 0.08_34, 'label': 'left'}, ] self.assertIn(nested_simplify(lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) _lowerCamelCase : Union[str, Any] = {'array': np.ones((8000,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate} _lowerCamelCase : Optional[Any] = audio_classifier(lowercase , top_k=4 ) self.assertIn(nested_simplify(lowercase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def A_ ( self ): import datasets _lowerCamelCase : Optional[Any] = 'superb/wav2vec2-base-superb-ks' _lowerCamelCase : Tuple = pipeline('audio-classification' , model=lowercase ) _lowerCamelCase : List[str] = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' ) _lowerCamelCase : Any = np.array(dataset[3]['speech'] , dtype=np.floataa ) _lowerCamelCase : Dict = audio_classifier(lowercase , top_k=4 ) self.assertEqual( nested_simplify(lowercase , decimals=3 ) , [ {'score': 0.9_81, 'label': 'go'}, {'score': 0.0_07, 'label': 'up'}, {'score': 0.0_06, 'label': '_unknown_'}, {'score': 0.0_01, 'label': 'down'}, ] , ) @require_tf @unittest.skip('Audio classification is not implemented for TF' ) def A_ ( self ): pass
359
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = hf_hub_download( repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 ) _lowerCamelCase : List[str] = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def A_ ( self , lowercase , lowercase ): for example in examples: _lowerCamelCase : Tuple = video_classifier(lowercase ) self.assertEqual( lowercase , [ {'score': ANY(lowercase ), 'label': ANY(lowercase )}, {'score': ANY(lowercase ), 'label': ANY(lowercase )}, ] , ) @require_torch def A_ ( self ): _lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' _lowerCamelCase : Tuple = VideoMAEFeatureExtractor( size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} ) _lowerCamelCase : Dict = pipeline( 'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 ) _lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , ) _lowerCamelCase : str = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], ] , ) @require_tf def A_ ( self ): pass
12
0
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): _lowerCamelCase : Tuple = coefficient_matrix.shape _lowerCamelCase : Dict = constant_matrix.shape if rowsa != colsa: _lowerCamelCase : Dict = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(lowercase__ ) if colsa != 1: _lowerCamelCase : Any = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(lowercase__ ) if rowsa != rowsa: _lowerCamelCase : Any = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(lowercase__ ) if len(lowercase__ ) != rowsa: _lowerCamelCase : List[str] = ( 'Number of initial values must be equal to number of rows in coefficient ' f'''matrix but received {len(lowercase__ )} and {rowsa}''' ) raise ValueError(lowercase__ ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) _lowerCamelCase : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) _lowerCamelCase : Any = table.shape strictly_diagonally_dominant(lowercase__ ) # Iterates the whole matrix for given number of times for _ in range(lowercase__ ): _lowerCamelCase : List[str] = [] for row in range(lowercase__ ): _lowerCamelCase : Optional[int] = 0 for col in range(lowercase__ ): if col == row: _lowerCamelCase : Any = table[row][col] elif col == cols - 1: _lowerCamelCase : Optional[int] = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] _lowerCamelCase : List[str] = (temp + val) / denom new_val.append(lowercase__ ) _lowerCamelCase : List[Any] = new_val return [float(lowercase__ ) for i in new_val] def _snake_case ( lowercase__ ): _lowerCamelCase : Union[str, Any] = table.shape _lowerCamelCase : List[str] = True for i in range(0 , lowercase__ ): _lowerCamelCase : str = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
0
"""simple docstring""" import operator def _snake_case ( lowercase__ , lowercase__ = False , lowercase__ = None ): _lowerCamelCase : Dict = operator.lt if reverse else operator.gt _lowerCamelCase : Optional[int] = solution or [] if not arr: return solution _lowerCamelCase : Optional[int] = [arr.pop(0 )] for i, item in enumerate(lowercase__ ): if _operator(lowercase__ , sublist[-1] ): sublist.append(lowercase__ ) arr.pop(lowercase__ ) # merging sublist into solution list if not solution: solution.extend(lowercase__ ) else: while sublist: _lowerCamelCase : Tuple = sublist.pop(0 ) for i, xx in enumerate(lowercase__ ): if not _operator(lowercase__ , lowercase__ ): solution.insert(lowercase__ , lowercase__ ) break else: solution.append(lowercase__ ) strand_sort(lowercase__ , lowercase__ , lowercase__ ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
361
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): if attention_mask is None: _lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = OPTConfig lowerCamelCase__ = {} lowerCamelCase__ = """gelu""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ): _lowerCamelCase : Tuple = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Tuple = seq_length _lowerCamelCase : str = is_training _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : List[Any] = eos_token_id _lowerCamelCase : Tuple = pad_token_id _lowerCamelCase : List[str] = bos_token_id _lowerCamelCase : Optional[int] = embed_dim _lowerCamelCase : List[str] = word_embed_proj_dim _lowerCamelCase : Any = False def A_ ( self ): _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCamelCase : Tuple = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , ) _lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase ) return config, inputs_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase ) _lowerCamelCase : Optional[Any] = inputs_dict['input_ids'] _lowerCamelCase : str = input_ids[:1, :] _lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :] _lowerCamelCase : Optional[Any] = 1 # first forward pass _lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) _lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) _lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0] _lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else () lowerCamelCase__ = ( {"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = 10 def A_ ( self ): _lowerCamelCase : int = TFOPTModelTester(self ) _lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase , lowercase ): if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings _lowerCamelCase : Optional[int] = model_class(config=lowercase ) _lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase ) _lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. _lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase ) # check that weights remain the same after resizing _lowerCamelCase : int = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Optional[Any] = False self.assertTrue(lowercase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase ) _lowerCamelCase : Dict = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Union[str, Any] = False self.assertTrue(lowercase ) def _snake_case ( lowercase__ ): return tf.constant(lowercase__ , dtype=tf.intaa ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = 99 def A_ ( self ): _lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2 _lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) _lowerCamelCase : int = input_ids.shape[0] _lowerCamelCase : List[Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' ) _lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id ) with tf.GradientTape(): _lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state _lowerCamelCase : Optional[Any] = (1, 11, 512) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : List[str] = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) ) _lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): super().setUp() _lowerCamelCase : List[Any] = 'facebook/opt-350m' def A_ ( self ): _lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model ) _lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model ) _lowerCamelCase : List[str] = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase ) _lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) _lowerCamelCase : Any = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) _lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def A_ ( self ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def A_ ( self ): _lowerCamelCase : str = 'facebook/opt-125m' _lowerCamelCase : Dict = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : int = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = 'facebook/opt-350m' _lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase ) _lowerCamelCase : Any = 'left' # use different length sentences to test batching _lowerCamelCase : Optional[int] = [ 'Hello, my dog is a little', 'Today, I', ] _lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase ) _lowerCamelCase : int = inputs['input_ids'] _lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] ) _lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase ) _lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) _lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) _lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) def A_ ( self ): _lowerCamelCase : Tuple = 'facebook/opt-350m' _lowerCamelCase : List[Any] = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase )
12
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase__ = { """configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""], """configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""MaskFormerFeatureExtractor"""] lowercase__ = ["""MaskFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """MaskFormerForInstanceSegmentation""", """MaskFormerModel""", """MaskFormerPreTrainedModel""", ] lowercase__ = [ """MaskFormerSwinBackbone""", """MaskFormerSwinModel""", """MaskFormerSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
362
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """philschmid/bart-large-cnn-samsum""" lowerCamelCase__ = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) lowerCamelCase__ = """summarizer""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = ["""text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase ): return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase )[0] def A_ ( self , lowercase ): return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
12
0
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase__ = { """E""": 12.70, """T""": 9.06, """A""": 8.17, """O""": 7.51, """I""": 6.97, """N""": 6.75, """S""": 6.33, """H""": 6.09, """R""": 5.99, """D""": 4.25, """L""": 4.03, """C""": 2.78, """U""": 2.76, """M""": 2.41, """W""": 2.36, """F""": 2.23, """G""": 2.02, """Y""": 1.97, """P""": 1.93, """B""": 1.29, """V""": 0.98, """K""": 0.77, """J""": 0.15, """X""": 0.15, """Q""": 0.10, """Z""": 0.07, } lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( lowercase__ ): return x[0] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = get_letter_count(lowercase__ ) _lowerCamelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase__ ) _lowerCamelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ ) _lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] ) _lowerCamelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase__ , reverse=lowercase__ ) _lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : str = get_frequency_order(lowercase__ ) _lowerCamelCase : Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
363
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) ) _lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )] index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ ) _lowerCamelCase : float = 0 _lowerCamelCase : list[float] = [0] * len(lowercase__ ) for i in index: if weight[i] <= capacity: _lowerCamelCase : int = 1 max_value += value[i] capacity -= weight[i] else: _lowerCamelCase : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """speech_to_text_2""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , lowercase=10000 , lowercase=6 , lowercase=2048 , lowercase=4 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1024 , **lowercase , ): _lowerCamelCase : Any = vocab_size _lowerCamelCase : Optional[Any] = d_model _lowerCamelCase : str = decoder_ffn_dim _lowerCamelCase : Tuple = decoder_layers _lowerCamelCase : str = decoder_attention_heads _lowerCamelCase : Dict = dropout _lowerCamelCase : Union[str, Any] = attention_dropout _lowerCamelCase : Any = activation_dropout _lowerCamelCase : Optional[Any] = activation_function _lowerCamelCase : Dict = init_std _lowerCamelCase : Union[str, Any] = decoder_layerdrop _lowerCamelCase : Any = use_cache _lowerCamelCase : Dict = decoder_layers _lowerCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCamelCase : Optional[int] = max_target_positions super().__init__( pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
364
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowercase__ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowercase__ = [] lowercase__ = [] lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowercase__ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", """emoji""": True, }, } ] lowercase__ = 0 for log in Path().glob("""*.log"""): lowercase__ = 0 with open(log, """r""") as f: for line in f: lowercase__ = json.loads(line) if line.get("""nodeid""", """""") != "": lowercase__ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowercase__ = F"{line['duration']:.4f}" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowercase__ = [] log.unlink() lowercase__ = """""" lowercase__ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowercase__ = [] lowercase__ = {} for test in failed_tests: lowercase__ = test[0].split("""::""") lowercase__ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowercase__ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowercase__ = [test[0] for test in failed_table] lowercase__ = list(set(files)) # Count number of instances in failed_tests lowercase__ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowercase__ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: lowercase__ = """Too many failed tests, please see the full report in the Action results.""" lowercase__ = len(err) + 10 lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}" print(F"### {message}") else: lowercase__ = """No failed tests! 🤗""" print(F"## {message}") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) lowercase__ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowercase__ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowercase__ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowercase__ = row[0] else: lowercase__ = """""" lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
12
0
"""simple docstring""" def _snake_case ( lowercase__ ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection _lowerCamelCase : List[str] = len(lowercase__ ) _lowerCamelCase : List[str] = max(lowercase__ ) _lowerCamelCase : List[str] = min(lowercase__ ) # create the counting array _lowerCamelCase : List[Any] = coll_max + 1 - coll_min _lowerCamelCase : List[Any] = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): _lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1] # create the output collection _lowerCamelCase : Dict = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): _lowerCamelCase : Any = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _snake_case ( lowercase__ ): return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt" lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(counting_sort(unsorted))
365
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """AutoTokenizer""" lowerCamelCase__ = ["""tokenizer"""] lowerCamelCase__ = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , lowercase , lowercase=None ): super().__init__(lowercase ) _lowerCamelCase : Optional[int] = speaker_embeddings @classmethod def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: _lowerCamelCase : Optional[Any] = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'''`{os.path.join(lowercase , lowercase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) _lowerCamelCase : List[Any] = None else: with open(lowercase ) as speaker_embeddings_json: _lowerCamelCase : Union[str, Any] = json.load(lowercase ) else: _lowerCamelCase : Tuple = None _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase ) _lowerCamelCase : int = {} _lowerCamelCase : List[Any] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase ) _lowerCamelCase : Any = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , ) _lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' ) _lowerCamelCase : Optional[Any] = tmp_dict with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def A_ ( self , lowercase = None , **lowercase ): _lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset] _lowerCamelCase : Any = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) _lowerCamelCase : Union[str, Any] = get_file_from_repo( self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if path is None: raise ValueError( F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) _lowerCamelCase : List[str] = np.load(lowercase ) return voice_preset_dict def A_ ( self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _lowerCamelCase : Any = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ): _lowerCamelCase : Optional[Any] = voice_preset + '.npz' _lowerCamelCase : Union[str, Any] = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) _lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase ) _lowerCamelCase : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: _lowerCamelCase : Optional[int] = voice_preset return encoded_text
12
0
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration lowercase__ = pytest.mark.integration lowercase__ = {"""comet"""} lowercase__ = importlib.util.find_spec("""fairseq""") is not None lowercase__ = {"""code_eval"""} lowercase__ = os.name == """nt""" lowercase__ = {"""bertscore""", """frugalscore""", """perplexity"""} lowercase__ = importlib.util.find_spec("""transformers""") is not None def _snake_case ( lowercase__ ): @wraps(lowercase__ ) def wrapper(self , lowercase__ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"' ) else: test_case(self , lowercase__ ) return wrapper def _snake_case ( lowercase__ ): @wraps(lowercase__ ) def wrapper(self , lowercase__ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"' ) else: test_case(self , lowercase__ ) return wrapper def _snake_case ( lowercase__ ): @wraps(lowercase__ ) def wrapper(self , lowercase__ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"' ) else: test_case(self , lowercase__ ) return wrapper def _snake_case ( ): _lowerCamelCase : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowercase, lowercase, lowercase ) @local class lowerCAmelCase__ ( parameterized.TestCase ): '''simple docstring''' lowerCamelCase__ = {} lowerCamelCase__ = None @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' ) def A_ ( self , lowercase ): _lowerCamelCase : Tuple = '[...]' _lowerCamelCase : int = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowercase ) ).module_path ) _lowerCamelCase : Union[str, Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=lowercase ) # check parameters _lowerCamelCase : Dict = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(lowercase , metric_module.__name__ ): with self.use_local_metrics(): try: _lowerCamelCase : str = doctest.testmod(lowercase , verbose=lowercase , raise_on_error=lowercase ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def A_ ( self , lowercase ): _lowerCamelCase : Union[str, Any] = '[...]' _lowerCamelCase : List[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('metrics' , lowercase ) ).module_path ) # run doctest with self.use_local_metrics(): _lowerCamelCase : int = doctest.testmod(lowercase , verbose=lowercase , raise_on_error=lowercase ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def A_ ( self , lowercase , lowercase ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowercase ): yield else: yield @contextmanager def A_ ( self ): def load_local_metric(lowercase , *lowercase , **lowercase ): return load_metric(os.path.join('metrics' , lowercase ) , *lowercase , **lowercase ) with patch('datasets.load_metric' ) as mock_load_metric: _lowerCamelCase : List[str] = load_local_metric yield @classmethod def A_ ( cls , lowercase ): def wrapper(lowercase ): _lowerCamelCase : Optional[int] = contextmanager(lowercase ) _lowerCamelCase : int = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('bleurt' ) def _snake_case ( lowercase__ ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self , lowercase ): assert len(input_dict['input_ids'] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('bleurt.score._create_predictor' ) as mock_create_predictor: _lowerCamelCase : Optional[int] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('bertscore' ) def _snake_case ( lowercase__ ): import torch def bert_cos_score_idf(lowercase__ , lowercase__ , *lowercase__ , **lowercase__ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(lowercase__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('bert_score.scorer.get_model' ), patch( 'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf: _lowerCamelCase : Dict = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('comet' ) def _snake_case ( lowercase__ ): def load_from_checkpoint(lowercase__ ): class lowerCAmelCase__ : '''simple docstring''' def A_ ( self , lowercase , *lowercase , **lowercase ): assert len(lowercase ) == 2 _lowerCamelCase : Dict = [0.19, 0.92] return scores, sum(lowercase ) / len(lowercase ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('comet.download_model' ) as mock_download_model: _lowerCamelCase : int = None with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint: _lowerCamelCase : List[Any] = load_from_checkpoint yield def _snake_case ( ): _lowerCamelCase : int = load_metric(os.path.join('metrics' , 'seqeval' ) ) _lowerCamelCase : List[Any] = 'ERROR' _lowerCamelCase : Optional[int] = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(lowercase__ , match=re.escape(lowercase__ ) ): metric.compute(predictions=[] , references=[] , scheme=lowercase__ )
366
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowercase__ = False class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCamelCase : Dict = torch.manual_seed(0 ) _lowerCamelCase : Dict = pipe( image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _lowerCamelCase : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
12
0
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def _snake_case ( lowercase__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( ): _lowerCamelCase : List[Any] = 2 while True: if is_prime(lowercase__ ): yield num num += 1 def _snake_case ( lowercase__ = 2000000 ): return sum(takewhile(lambda lowercase__ : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"{solution() = }")
367
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase__ = { """E""": 12.70, """T""": 9.06, """A""": 8.17, """O""": 7.51, """I""": 6.97, """N""": 6.75, """S""": 6.33, """H""": 6.09, """R""": 5.99, """D""": 4.25, """L""": 4.03, """C""": 2.78, """U""": 2.76, """M""": 2.41, """W""": 2.36, """F""": 2.23, """G""": 2.02, """Y""": 1.97, """P""": 1.93, """B""": 1.29, """V""": 0.98, """K""": 0.77, """J""": 0.15, """X""": 0.15, """Q""": 0.10, """Z""": 0.07, } lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( lowercase__ ): return x[0] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = get_letter_count(lowercase__ ) _lowerCamelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase__ ) _lowerCamelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ ) _lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] ) _lowerCamelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase__ , reverse=lowercase__ ) _lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : str = get_frequency_order(lowercase__ ) _lowerCamelCase : Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" def _snake_case ( lowercase__ ): _lowerCamelCase : str = [False] * len(lowercase__ ) _lowerCamelCase : Optional[int] = [-1] * len(lowercase__ ) def dfs(lowercase__ , lowercase__ ): _lowerCamelCase : Optional[int] = True _lowerCamelCase : Union[str, Any] = c for u in graph[v]: if not visited[u]: dfs(lowercase__ , 1 - c ) for i in range(len(lowercase__ ) ): if not visited[i]: dfs(lowercase__ , 0 ) for i in range(len(lowercase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowercase__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
368
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase ): _lowerCamelCase : Dict = question_encoder _lowerCamelCase : List[Any] = generator _lowerCamelCase : Optional[Any] = self.question_encoder def A_ ( self , lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' ) _lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' ) self.question_encoder.save_pretrained(lowercase ) self.generator.save_pretrained(lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase ) if config is None: _lowerCamelCase : int = RagConfig.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' ) _lowerCamelCase : Dict = AutoTokenizer.from_pretrained( lowercase , config=config.generator , subfolder='generator_tokenizer' ) return cls(question_encoder=lowercase , generator=lowercase ) def __call__( self , *lowercase , **lowercase ): return self.current_tokenizer(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.decode(*lowercase , **lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.question_encoder def A_ ( self ): _lowerCamelCase : Optional[Any] = self.generator def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ): warnings.warn( '`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ' 'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ' 'context manager to prepare your targets. See the documentation of your specific tokenizer for more ' 'details' , lowercase , ) if max_length is None: _lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length _lowerCamelCase : Optional[Any] = self( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _lowerCamelCase : int = self.current_tokenizer.model_max_length _lowerCamelCase : str = self( text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , ) _lowerCamelCase : int = labels['input_ids'] return model_inputs
12
0
"""simple docstring""" from math import factorial def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) _lowerCamelCase : List[str] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _lowerCamelCase : str = float(factorial(lowercase__ ) ) coefficient /= factorial(lowercase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print("""Probability of 2 successes out of 4 trails""") print("""with probability of 0.75 is:""", end=""" """) print(binomial_distribution(2, 4, 0.75))
369
"""simple docstring""" def _snake_case ( lowercase__ = 10 ): if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError('Invalid input' ) _lowerCamelCase : str = 10**n _lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"{solution(10) = }")
12
0
"""simple docstring""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() lowercase__ = logging.get_logger(__name__) def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = torch.load(lowercase__ , map_location='cpu' ) if "model" in sd.keys(): _lowerCamelCase : Tuple = torch.load(lowercase__ , map_location='cpu' )['model'] # pop unnecessary weights _lowerCamelCase : int = [ 'decoder.version', 'decoder.output_projection.weight', ] for key in keys_to_delete: if key in sd: sd.pop(lowercase__ ) _lowerCamelCase : List[Any] = { 'decoder.project_in_dim.weight': 'decoder.project_in.weight', 'decoder.project_out_dim.weight': 'decoder.project_out.weight', 'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight', 'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: _lowerCamelCase : int = sd.pop(lowercase__ ) _lowerCamelCase : Union[str, Any] = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: _lowerCamelCase : Any = sd[key] # We split QKV in separate Q,K,V _lowerCamelCase : List[str] = key.replace('.qkv_proj.' , '.q_proj.' ) _lowerCamelCase : Optional[int] = key.replace('.qkv_proj.' , '.k_proj.' ) _lowerCamelCase : List[Any] = key.replace('.qkv_proj.' , '.v_proj.' ) _lowerCamelCase : str = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 _lowerCamelCase : Union[str, Any] = torch.split(lowercase__ , depth // 3 , dim=0 ) _lowerCamelCase : Any = q _lowerCamelCase : Optional[Any] = k _lowerCamelCase : Tuple = v del sd[key] return sd @torch.no_grad() def _snake_case ( lowercase__ , lowercase__ , lowercase__=None ): _lowerCamelCase : Optional[Any] = load_checkpoint(lowercase__ ) if config is not None: _lowerCamelCase : str = OPTConfig.from_pretrained(lowercase__ ) else: _lowerCamelCase : Dict = OPTConfig() _lowerCamelCase : Any = OPTModel(lowercase__ ).half().eval() model.load_state_dict(lowercase__ ) # Check results Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) model.save_pretrained(lowercase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fairseq_path""", type=str, help=( """path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:""" """ https://huggingface.co/models?other=opt_metasq""" ), ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""") lowercase__ = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
370
"""simple docstring""" import argparse import datetime def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } _lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase__ ) < 11: raise ValueError('Must be 10 characters long' ) # Get month _lowerCamelCase : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) _lowerCamelCase : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day _lowerCamelCase : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator _lowerCamelCase : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year _lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation _lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) ) # Start math if m <= 2: _lowerCamelCase : str = y - 1 _lowerCamelCase : Tuple = m + 12 # maths var _lowerCamelCase : int = int(str(lowercase__ )[:2] ) _lowerCamelCase : int = int(str(lowercase__ )[2:] ) _lowerCamelCase : int = int(2.6 * m - 5.3_9 ) _lowerCamelCase : int = int(c / 4 ) _lowerCamelCase : int = int(k / 4 ) _lowerCamelCase : int = int(d + k ) _lowerCamelCase : int = int(t + u + v + x ) _lowerCamelCase : int = int(z - (2 * c) ) _lowerCamelCase : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response _lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) lowercase__ = parser.parse_args() zeller(args.date_input)
12
0
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase__ ( lowercase, lowercase ): '''simple docstring''' @register_to_config def __init__( self , lowercase , lowercase = None , lowercase = None ): super().__init__() _lowerCamelCase : Dict = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _lowerCamelCase : int = torch.zeros(lowercase , lowercase ) else: _lowerCamelCase : Tuple = None _lowerCamelCase : List[str] = torch.nn.Parameter(lowercase ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ): super().__init__() self.register_modules( vqvae=lowercase , transformer=lowercase , text_encoder=lowercase , tokenizer=lowercase , scheduler=lowercase , learned_classifier_free_sampling_embeddings=lowercase , ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Tuple = len(lowercase ) if isinstance(lowercase , lowercase ) else 1 # get prompt text embeddings _lowerCamelCase : Tuple = self.tokenizer( lowercase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) _lowerCamelCase : Optional[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _lowerCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _lowerCamelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] _lowerCamelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _lowerCamelCase : Optional[int] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase ) # duplicate text embeddings for each generation per prompt _lowerCamelCase : Optional[int] = prompt_embeds.repeat_interleave(lowercase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _lowerCamelCase : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings _lowerCamelCase : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase , 1 , 1 ) else: _lowerCamelCase : Optional[int] = [''] * batch_size _lowerCamelCase : Optional[int] = text_input_ids.shape[-1] _lowerCamelCase : List[Any] = self.tokenizer( lowercase , padding='max_length' , max_length=lowercase , truncation=lowercase , return_tensors='pt' , ) _lowerCamelCase : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _lowerCamelCase : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _lowerCamelCase : List[str] = negative_prompt_embeds.shape[1] _lowerCamelCase : int = negative_prompt_embeds.repeat(1 , lowercase , 1 ) _lowerCamelCase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCamelCase : str = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , lowercase , lowercase = 100 , lowercase = 5.0 , lowercase = 1.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , ): if isinstance(lowercase , lowercase ): _lowerCamelCase : str = 1 elif isinstance(lowercase , lowercase ): _lowerCamelCase : List[Any] = len(lowercase ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowercase )}''' ) _lowerCamelCase : str = batch_size * num_images_per_prompt _lowerCamelCase : List[Any] = guidance_scale > 1.0 _lowerCamelCase : List[str] = self._encode_prompt(lowercase , lowercase , lowercase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase )}.''' ) # get the initial completely masked latents unless the user supplied it _lowerCamelCase : Tuple = (batch_size, self.transformer.num_latent_pixels) if latents is None: _lowerCamelCase : Tuple = self.transformer.num_vector_embeds - 1 _lowerCamelCase : List[Any] = torch.full(lowercase , lowercase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( 'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,' F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _lowerCamelCase : List[str] = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(lowercase , device=self.device ) _lowerCamelCase : int = self.scheduler.timesteps.to(self.device ) _lowerCamelCase : Tuple = latents for i, t in enumerate(self.progress_bar(lowercase ) ): # expand the sample if we are doing classifier free guidance _lowerCamelCase : Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _lowerCamelCase : str = self.transformer(lowercase , encoder_hidden_states=lowercase , timestep=lowercase ).sample if do_classifier_free_guidance: _lowerCamelCase : str = model_output.chunk(2 ) _lowerCamelCase : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(lowercase , dim=1 , keepdim=lowercase ) _lowerCamelCase : Dict = self.truncate(lowercase , lowercase ) # remove `log(0)`'s (`-inf`s) _lowerCamelCase : Optional[int] = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _lowerCamelCase : Any = self.scheduler.step(lowercase , timestep=lowercase , sample=lowercase , generator=lowercase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase , lowercase , lowercase ) _lowerCamelCase : Optional[int] = self.vqvae.config.vq_embed_dim _lowerCamelCase : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _lowerCamelCase : Optional[int] = self.vqvae.quantize.get_codebook_entry(lowercase , shape=lowercase ) _lowerCamelCase : int = self.vqvae.decode(lowercase , force_not_quantize=lowercase ).sample _lowerCamelCase : Any = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCamelCase : Union[str, Any] = self.numpy_to_pil(lowercase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase ) def A_ ( self , lowercase , lowercase ): _lowerCamelCase : int = torch.sort(lowercase , 1 , descending=lowercase ) _lowerCamelCase : int = torch.exp(lowercase ) _lowerCamelCase : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _lowerCamelCase : List[str] = torch.full_like(keep_mask[:, 0:1, :] , lowercase ) _lowerCamelCase : Dict = torch.cat((all_true, keep_mask) , dim=1 ) _lowerCamelCase : List[Any] = keep_mask[:, :-1, :] _lowerCamelCase : str = keep_mask.gather(1 , indices.argsort(1 ) ) _lowerCamelCase : List[str] = log_p_x_0.clone() _lowerCamelCase : List[str] = -torch.inf # -inf = log(0) return rv
371
"""simple docstring""" import re def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' ) if match := re.search(lowercase__ , lowercase__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
12
0
"""simple docstring""" import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , lowercase , lowercase=2 , lowercase=56 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=2 , lowercase=7 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , lowercase="block_sparse" , lowercase=True , lowercase=False , lowercase=2 , lowercase=3 , ): _lowerCamelCase : Any = parent _lowerCamelCase : int = batch_size _lowerCamelCase : int = seq_length _lowerCamelCase : List[Any] = is_training _lowerCamelCase : Any = use_attention_mask _lowerCamelCase : Dict = use_token_type_ids _lowerCamelCase : str = use_labels _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Tuple = hidden_size _lowerCamelCase : Dict = num_hidden_layers _lowerCamelCase : Optional[Any] = num_attention_heads _lowerCamelCase : List[str] = intermediate_size _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = max_position_embeddings _lowerCamelCase : int = type_vocab_size _lowerCamelCase : int = type_sequence_label_size _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : int = num_choices _lowerCamelCase : Optional[Any] = rescale_embeddings _lowerCamelCase : str = attention_type _lowerCamelCase : Optional[Any] = use_bias _lowerCamelCase : Optional[Any] = block_size _lowerCamelCase : List[str] = num_random_blocks def A_ ( self ): _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCamelCase : int = None if self.use_attention_mask: _lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCamelCase : Optional[Any] = None if self.use_token_type_ids: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCamelCase : Union[str, Any] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def A_ ( self ): _lowerCamelCase : Dict = self.prepare_config_and_inputs() _lowerCamelCase : List[Any] = config_and_inputs _lowerCamelCase : Optional[Any] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask, } return config, inputs_dict @require_flax class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) lowerCamelCase__ = False lowerCamelCase__ = False def A_ ( self ): _lowerCamelCase : Tuple = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def A_ ( self ): super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def A_ ( self ): super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def A_ ( self ): super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def A_ ( self ): super().test_hidden_states_output() @slow def A_ ( self ): for model_class_name in self.all_model_classes: _lowerCamelCase : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' ) self.assertIsNotNone(lowercase ) def A_ ( self ): if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def A_ ( self ): _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCamelCase : int = self._prepare_for_class(lowercase , lowercase ) _lowerCamelCase : Optional[Any] = model_class(lowercase ) @jax.jit def model_jitted(lowercase , lowercase=None , **lowercase ): return model(input_ids=lowercase , attention_mask=lowercase , **lowercase ) with self.subTest('JIT Enabled' ): _lowerCamelCase : Tuple = model_jitted(**lowercase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowerCamelCase : List[Any] = model_jitted(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) ) for jitted_output, output in zip(lowercase , lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def A_ ( self , lowercase , lowercase , lowercase , lowercase=1E-5 , lowercase="outputs" , lowercase=None ): # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith('outputs.attentions' ): return else: super().check_pt_flax_outputs(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
350
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowercase__ = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} lowercase__ = """zero2""" lowercase__ = """zero3""" lowercase__ = [ZEROa, ZEROa] def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test lowercase__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) def A_ ( self , lowercase ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = models[model] _lowerCamelCase : Optional[int] = self.run_trainer( stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , ) self.do_checks(lowercase ) return output_dir def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase ) _lowerCamelCase : Any = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowercase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _lowerCamelCase : Dict = self.get_launcher(lowercase ) _lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase , env=self.get_env() ) return output_dir def A_ ( self , lowercase=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
12
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer lowercase__ = logging.get_logger(__name__) lowercase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase__ = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } lowercase__ = { """google/realm-cc-news-pretrained-embedder""": 512, """google/realm-cc-news-pretrained-encoder""": 512, """google/realm-cc-news-pretrained-scorer""": 512, """google/realm-cc-news-pretrained-openqa""": 512, """google/realm-orqa-nq-openqa""": 512, """google/realm-orqa-nq-reader""": 512, """google/realm-orqa-wq-openqa""": 512, """google/realm-orqa-wq-reader""": 512, } lowercase__ = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = RealmTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ): super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) _lowerCamelCase : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowercase ) != do_lower_case or normalizer_state.get('strip_accents' , lowercase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars ): _lowerCamelCase : List[str] = getattr(lowercase , normalizer_state.pop('type' ) ) _lowerCamelCase : str = do_lower_case _lowerCamelCase : Tuple = strip_accents _lowerCamelCase : Union[str, Any] = tokenize_chinese_chars _lowerCamelCase : int = normalizer_class(**lowercase ) _lowerCamelCase : str = do_lower_case def A_ ( self , lowercase , **lowercase ): _lowerCamelCase : Tuple = PaddingStrategy.MAX_LENGTH _lowerCamelCase : List[str] = text _lowerCamelCase : int = kwargs.pop('text_pair' , lowercase ) _lowerCamelCase : Tuple = kwargs.pop('return_tensors' , lowercase ) _lowerCamelCase : Union[str, Any] = { 'input_ids': [], 'attention_mask': [], 'token_type_ids': [], } for idx, candidate_text in enumerate(lowercase ): if batch_text_pair is not None: _lowerCamelCase : List[Any] = batch_text_pair[idx] else: _lowerCamelCase : Dict = None _lowerCamelCase : List[str] = super().__call__(lowercase , lowercase , return_tensors=lowercase , **lowercase ) _lowerCamelCase : int = encoded_candidates.get('input_ids' ) _lowerCamelCase : Any = encoded_candidates.get('attention_mask' ) _lowerCamelCase : Optional[int] = encoded_candidates.get('token_type_ids' ) if encoded_input_ids is not None: output_data["input_ids"].append(lowercase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(lowercase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(lowercase ) _lowerCamelCase : str = {key: item for key, item in output_data.items() if len(lowercase ) != 0} return BatchEncoding(lowercase , tensor_type=lowercase ) def A_ ( self , lowercase , lowercase=None ): _lowerCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self , lowercase , lowercase = None ): _lowerCamelCase : str = [self.sep_token_id] _lowerCamelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self , lowercase , lowercase = None ): _lowerCamelCase : Any = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
351
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""pixel_values"""] def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ): super().__init__(**lowercase ) _lowerCamelCase : Optional[Any] = do_rescale _lowerCamelCase : Union[str, Any] = rescale_factor _lowerCamelCase : Any = do_pad _lowerCamelCase : Optional[int] = pad_size def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase = None ): _lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase ) _lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height _lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase ) def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): _lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad _lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size _lowerCamelCase : Dict = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. _lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images] if do_rescale: _lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_pad: _lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images] _lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] _lowerCamelCase : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=lowercase , tensor_type=lowercase )
12
0
from pathlib import Path import fire from tqdm import tqdm def _snake_case ( lowercase__="ro" , lowercase__="en" , lowercase__="wmt16" , lowercase__=None ): try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('run pip install datasets' ) _lowerCamelCase : List[Any] = f'''{src_lang}-{tgt_lang}''' print(f'''Converting {dataset}-{pair}''' ) _lowerCamelCase : List[Any] = datasets.load_dataset(lowercase__ , lowercase__ ) if save_dir is None: _lowerCamelCase : Optional[int] = f'''{dataset}-{pair}''' _lowerCamelCase : Optional[int] = Path(lowercase__ ) save_dir.mkdir(exist_ok=lowercase__ ) for split in ds.keys(): print(f'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets _lowerCamelCase : Optional[int] = 'val' if split == 'validation' else split _lowerCamelCase : List[str] = save_dir.joinpath(f'''{fn}.source''' ) _lowerCamelCase : Union[str, Any] = save_dir.joinpath(f'''{fn}.target''' ) _lowerCamelCase : List[str] = src_path.open('w+' ) _lowerCamelCase : Union[str, Any] = tgt_path.open('w+' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): _lowerCamelCase : List[Any] = x['translation'] src_fp.write(ex[src_lang] + '\n' ) tgt_fp.write(ex[tgt_lang] + '\n' ) print(f'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
352
"""simple docstring""" import os import string import sys lowercase__ = 1 << 8 lowercase__ = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } lowercase__ = KEYMAP["""up"""] lowercase__ = KEYMAP["""left"""] if sys.platform == "win32": lowercase__ = [] lowercase__ = { B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): lowercase__ = ord(str(i)) def _snake_case ( ): if os.name == "nt": import msvcrt _lowerCamelCase : Any = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowercase__ ) == 0: # Read the keystroke _lowerCamelCase : str = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCamelCase : List[Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(lowercase__ ) if ord(lowercase__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _lowerCamelCase : List[Any] = chr(KEYMAP['esc'] ) except KeyError: _lowerCamelCase : int = cha[1] else: _lowerCamelCase : Optional[int] = ch.decode(lowercase__ ) else: _lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCamelCase : List[str] = sys.stdin.fileno() _lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ ) try: tty.setraw(lowercase__ ) _lowerCamelCase : Optional[Any] = sys.stdin.read(1 ) finally: termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ ) return ch def _snake_case ( ): _lowerCamelCase : int = get_raw_chars() if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowercase__ ) == KEYMAP["esc"]: _lowerCamelCase : Union[str, Any] = get_raw_chars() if ord(lowercase__ ) == KEYMAP["mod_int"]: _lowerCamelCase : List[Any] = get_raw_chars() if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowercase__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
12
0
"""simple docstring""" def _snake_case ( lowercase__ ): """simple docstring""" return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') ) def _snake_case ( lowercase__ ): """simple docstring""" _lowerCamelCase : Any = credit_card_number _lowerCamelCase : str = 0 _lowerCamelCase : Tuple = len(lowercase__ ) - 2 for i in range(lowercase__ , -1 , -2 ): # double the value of every second digit _lowerCamelCase : List[Any] = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 _lowerCamelCase : Union[str, Any] = cc_number[:i] + str(lowercase__ ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(lowercase__ ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def _snake_case ( lowercase__ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = f'''{credit_card_number} is an invalid credit card number because''' if not credit_card_number.isdigit(): print(f'''{error_message} it has nonnumerical characters.''' ) return False if not 13 <= len(lowercase__ ) <= 16: print(f'''{error_message} of its length.''' ) return False if not validate_initial_digits(lowercase__ ): print(f'''{error_message} of its first two digits.''' ) return False if not luhn_validation(lowercase__ ): print(f'''{error_message} it fails the Luhn check.''' ) return False print(f'''{credit_card_number} is a valid credit card number.''' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
353
"""simple docstring""" from typing import Any def _snake_case ( lowercase__ ): if not input_list: return [] _lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list] _lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
354
"""simple docstring""" def _snake_case ( lowercase__ ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection _lowerCamelCase : List[str] = len(lowercase__ ) _lowerCamelCase : List[str] = max(lowercase__ ) _lowerCamelCase : List[str] = min(lowercase__ ) # create the counting array _lowerCamelCase : List[Any] = coll_max + 1 - coll_min _lowerCamelCase : List[Any] = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): _lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1] # create the output collection _lowerCamelCase : Dict = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): _lowerCamelCase : Any = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _snake_case ( lowercase__ ): return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt" lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(counting_sort(unsorted))
12
0
"""simple docstring""" import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _snake_case ( lowercase__ ): # picklable for multiprocessing return x.sum() def _snake_case ( lowercase__ ): # picklable for multiprocessing return i + 1 @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Union[str, Any] = {} _lowerCamelCase : str = [] _lowerCamelCase : List[Any] = 1 _lowerCamelCase : List[str] = [1, 2] _lowerCamelCase : List[str] = {'a': 1, 'b': 2} _lowerCamelCase : int = {'a': [1, 2], 'b': [3, 4]} _lowerCamelCase : Dict = {'a': {'1': 1}, 'b': 2} _lowerCamelCase : Union[str, Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4} _lowerCamelCase : List[str] = {} _lowerCamelCase : Dict = [] _lowerCamelCase : List[Any] = 2 _lowerCamelCase : Tuple = [2, 3] _lowerCamelCase : List[str] = {'a': 2, 'b': 3} _lowerCamelCase : Optional[int] = {'a': [2, 3], 'b': [4, 5]} _lowerCamelCase : Optional[Any] = {'a': {'1': 2}, 'b': 3} _lowerCamelCase : List[Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5} self.assertEqual(map_nested(lowercase , lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase ) , lowercase ) _lowerCamelCase : Union[str, Any] = 2 self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase ) self.assertEqual(map_nested(lowercase , lowercase , num_proc=lowercase ) , lowercase ) _lowerCamelCase : Tuple = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )} _lowerCamelCase : Any = {'a': 2, 'b': 0, 'c': 2} _lowerCamelCase : List[Any] = { 'a': np.eye(2 ).astype(lowercase ), 'b': np.zeros(3 ).astype(lowercase ), 'c': np.ones(2 ).astype(lowercase ), } self.assertEqual(map_nested(lowercase , lowercase , map_numpy=lowercase ) , lowercase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowercase , lowercase , map_numpy=lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(lowercase , lowercase , map_numpy=lowercase , num_proc=lowercase ) , lowercase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowercase , lowercase , map_numpy=lowercase , num_proc=lowercase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(lowercase ): # can't pickle a local lambda map_nested(lambda lowercase : x + 1 , lowercase , num_proc=lowercase ) def A_ ( self ): _lowerCamelCase : Tuple = {'a': 1, 'b': 2} _lowerCamelCase : Tuple = {'a': 3, 'b': 4} _lowerCamelCase : Union[str, Any] = {'a': 5, 'b': 6} _lowerCamelCase : Optional[Any] = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(lowercase , lowercase , lowercase ) ) , lowercase ) def A_ ( self ): class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = """bar""" _lowerCamelCase : Any = Foo() self.assertEqual(foo.my_attr , 'bar' ) with temporary_assignment(lowercase , 'my_attr' , 'BAR' ): self.assertEqual(foo.my_attr , 'BAR' ) self.assertEqual(foo.my_attr , 'bar' ) @pytest.mark.parametrize( 'iterable_length, num_proc, expected_num_proc' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch( 'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool: _lowerCamelCase : Tuple = {f'''{i}''': i for i in range(lowercase__ )} _lowerCamelCase : Dict = map_nested(lambda lowercase__ : x + 10 , lowercase__ , num_proc=lowercase__ , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @require_tf def A_ ( self ): import tensorflow as tf from tensorflow.keras import layers _lowerCamelCase : List[Any] = layers.Dense(2 ) def gen_random_output(): _lowerCamelCase : List[Any] = tf.random.uniform((1, 3) ) return model(lowercase ).numpy() with temp_seed(42 , set_tensorflow=lowercase ): _lowerCamelCase : Union[str, Any] = gen_random_output() with temp_seed(42 , set_tensorflow=lowercase ): _lowerCamelCase : Optional[Any] = gen_random_output() _lowerCamelCase : Union[str, Any] = gen_random_output() np.testing.assert_equal(lowercase , lowercase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def A_ ( self ): import torch def gen_random_output(): _lowerCamelCase : Any = torch.nn.Linear(3 , 2 ) _lowerCamelCase : str = torch.rand(1 , 3 ) return model(lowercase ).detach().numpy() with temp_seed(42 , set_pytorch=lowercase ): _lowerCamelCase : Any = gen_random_output() with temp_seed(42 , set_pytorch=lowercase ): _lowerCamelCase : List[str] = gen_random_output() _lowerCamelCase : Union[str, Any] = gen_random_output() np.testing.assert_equal(lowercase , lowercase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def A_ ( self ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): _lowerCamelCase : List[str] = gen_random_output() with temp_seed(42 ): _lowerCamelCase : List[Any] = gen_random_output() _lowerCamelCase : Dict = gen_random_output() np.testing.assert_equal(lowercase , lowercase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('input_data' , [{}] ) def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = NestedDataStructure(lowercase__ ).data assert output_data == input_data @pytest.mark.parametrize( 'data, expected_output' , [ ({}, []), ([], []), ('foo', ['foo']), (['foo', 'bar'], ['foo', 'bar']), ([['foo', 'bar']], ['foo', 'bar']), ([[['foo'], ['bar']]], ['foo', 'bar']), ([[['foo'], 'bar']], ['foo', 'bar']), ({'a': 1, 'b': 2}, [1, 2]), ({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]), ({'a': {'1': 1}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': [2]}, [1, 2]), ] , ) def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : str = NestedDataStructure(lowercase__ ).flatten() assert output == expected_output def _snake_case ( ): _lowerCamelCase : Tuple = A(x=1 , y='foobar' ) _lowerCamelCase : List[str] = {'x': 1, 'y': 'foobar'} assert asdict(lowercase__ ) == expected_output _lowerCamelCase : Dict = {'a': {'b': A(x=10 , y='foo' )}, 'c': [A(x=20 , y='bar' )]} _lowerCamelCase : int = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]} assert asdict(lowercase__ ) == expected_output with pytest.raises(lowercase__ ): asdict([1, A(x=10 , y='foo' )] ) def _snake_case ( lowercase__ ): return text.split() def _snake_case ( lowercase__ ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _snake_case ( ): with Pool(2 ) as pool: _lowerCamelCase : List[str] = list(iflatmap_unordered(lowercase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) ) assert out.count('hello' ) == 10 assert out.count('there' ) == 10 assert len(lowercase__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _lowerCamelCase : List[str] = list(iflatmap_unordered(lowercase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) ) assert out.count('hello' ) == 10 assert out.count('there' ) == 10 assert len(lowercase__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _lowerCamelCase : int = [] for yield_time, content in iflatmap_unordered( lowercase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(lowercase__ ) assert out.count('a' ) == 2 assert out.count('b' ) == 2 assert len(lowercase__ ) == 4
355
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) lowercase__ = parser.parse_args() lowercase__ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
12
0
"""simple docstring""" import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = CLIPTokenizer lowerCamelCase__ = CLIPTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = {} lowerCamelCase__ = False def A_ ( self ): super().setUp() # fmt: off _lowerCamelCase : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on _lowerCamelCase : Any = dict(zip(lowercase , range(len(lowercase ) ) ) ) _lowerCamelCase : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>'] _lowerCamelCase : List[Any] = {'unk_token': '<unk>'} _lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase ) ) def A_ ( self , **lowercase ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase ) def A_ ( self , **lowercase ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase ) def A_ ( self , lowercase ): _lowerCamelCase : str = 'lower newer' _lowerCamelCase : Tuple = 'lower newer' return input_text, output_text def A_ ( self ): _lowerCamelCase : Tuple = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _lowerCamelCase : str = 'lower newer' _lowerCamelCase : List[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>'] _lowerCamelCase : List[str] = tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) _lowerCamelCase : str = tokens + [tokenizer.unk_token] _lowerCamelCase : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) @require_ftfy def A_ ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained(lowercase , **lowercase ) _lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) _lowerCamelCase : Optional[Any] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.' _lowerCamelCase : Union[str, Any] = tokenizer_s.tokenize(lowercase ) _lowerCamelCase : Tuple = tokenizer_r.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways _lowerCamelCase : Optional[int] = 'xa\u0303y' + ' ' + 'x\xe3y' _lowerCamelCase : Optional[int] = tokenizer_s.tokenize(lowercase ) _lowerCamelCase : Optional[int] = tokenizer_r.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) # Test that the tokenization is identical on unicode of space type _lowerCamelCase : str = [ '\u0009', # (horizontal tab, '\t') '\u000B', # (vertical tab) '\u000C', # (form feed) '\u0020', # (space, ' ') '\u200E', # (left-to-right mark):w '\u200F', # (right-to-left mark) ] for unicode_seq in spaces_unicodes: _lowerCamelCase : Tuple = tokenizer_s.tokenize(lowercase ) _lowerCamelCase : int = tokenizer_r.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) # Test that the tokenization is identical on unicode of line break type _lowerCamelCase : Optional[int] = [ '\u000A', # (line feed, '\n') '\r\n', # (carriage return and line feed, '\r\n') '\u000D', # (carriage return, '\r') '\r', # (carriage return, '\r') '\u000D', # (carriage return, '\r') '\u2028', # (line separator) '\u2029', # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: _lowerCamelCase : Dict = tokenizer_s.tokenize(lowercase ) _lowerCamelCase : List[str] = tokenizer_r.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) def A_ ( self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _lowerCamelCase : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` _lowerCamelCase : Any = F'''{text_of_1_token} {text_of_1_token}''' _lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained( lowercase , use_fast=lowercase , ) _lowerCamelCase : List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , ) _lowerCamelCase : List[Any] = F''' {text}''' _lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained( lowercase , use_fast=lowercase , ) _lowerCamelCase : List[str] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , ) def A_ ( self ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowercase ) as context: self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' ) self.assertTrue( context.exception.args[0].startswith( 'The `backend_tokenizer` provided does not match the expected format.' ) ) @require_ftfy def A_ ( self ): super().test_tokenization_python_rust_equals() def A_ ( self ): # CLIP always lower cases letters pass
356
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = (UnCLIPScheduler,) def A_ ( self , **lowercase ): _lowerCamelCase : Any = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**lowercase ) return config def A_ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def A_ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=lowercase ) def A_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase ) def A_ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=lowercase ) def A_ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=lowercase ) def A_ ( self ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=lowercase , prev_timestep=lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' ) _lowerCamelCase : str = scheduler_class(**lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5 def A_ ( self ): _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' ) _lowerCamelCase : int = scheduler_class(**lowercase ) _lowerCamelCase : List[str] = 0.5 assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5 def A_ ( self ): _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : Tuple = scheduler_class(**lowercase ) _lowerCamelCase : Union[str, Any] = scheduler.timesteps _lowerCamelCase : Any = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter _lowerCamelCase : Optional[int] = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : Tuple = model(lowercase , lowercase ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample _lowerCamelCase : Optional[int] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def A_ ( self ): _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Optional[Any] = scheduler_class(**lowercase ) scheduler.set_timesteps(25 ) _lowerCamelCase : Optional[Any] = scheduler.timesteps _lowerCamelCase : Optional[int] = self.dummy_model() _lowerCamelCase : Any = self.dummy_sample_deter _lowerCamelCase : str = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : List[Any] = model(lowercase , lowercase ) if i + 1 == timesteps.shape[0]: _lowerCamelCase : Optional[int] = None else: _lowerCamelCase : List[str] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Union[str, Any] = scheduler.step( lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample _lowerCamelCase : List[Any] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def A_ ( self ): pass def A_ ( self ): pass
12
0
"""simple docstring""" import csv import tweepy # Twitter API credentials lowercase__ = """""" lowercase__ = """""" lowercase__ = """""" lowercase__ = """""" def _snake_case ( lowercase__ ): # authorize twitter, initialize tweepy _lowerCamelCase : int = tweepy.OAuthHandler(lowercase__ , lowercase__ ) auth.set_access_token(lowercase__ , lowercase__ ) _lowerCamelCase : Optional[Any] = tweepy.API(lowercase__ ) # initialize a list to hold all the tweepy Tweets _lowerCamelCase : Optional[int] = [] # make initial request for most recent tweets (200 is the maximum allowed count) _lowerCamelCase : List[str] = api.user_timeline(screen_name=lowercase__ , count=200 ) # save most recent tweets alltweets.extend(lowercase__ ) # save the id of the oldest tweet less one _lowerCamelCase : List[Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowercase__ ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates _lowerCamelCase : List[str] = api.user_timeline( screen_name=lowercase__ , count=200 , max_id=lowercase__ ) # save most recent tweets alltweets.extend(lowercase__ ) # update the id of the oldest tweet less one _lowerCamelCase : Optional[Any] = alltweets[-1].id - 1 print(f'''...{len(lowercase__ )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv _lowerCamelCase : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' , 'w' ) as f: _lowerCamelCase : Tuple = csv.writer(lowercase__ ) writer.writerow(['id', 'created_at', 'text'] ) writer.writerows(lowercase__ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
357
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """data2vec-audio""" def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) _lowerCamelCase : str = hidden_size _lowerCamelCase : str = feat_extract_activation _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Optional[Any] = conv_bias _lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings _lowerCamelCase : List[Any] = num_conv_pos_embedding_groups _lowerCamelCase : List[Any] = conv_pos_kernel_size _lowerCamelCase : Optional[int] = len(self.conv_dim ) _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Any = intermediate_size _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Any = hidden_dropout _lowerCamelCase : Union[str, Any] = attention_dropout _lowerCamelCase : str = activation_dropout _lowerCamelCase : Any = feat_proj_dropout _lowerCamelCase : Tuple = final_dropout _lowerCamelCase : Union[str, Any] = layerdrop _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Optional[Any] = mask_time_prob _lowerCamelCase : List[Any] = mask_time_length _lowerCamelCase : List[Any] = mask_time_min_masks _lowerCamelCase : Tuple = mask_feature_prob _lowerCamelCase : Optional[Any] = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # ctc loss _lowerCamelCase : Tuple = ctc_loss_reduction _lowerCamelCase : str = ctc_zero_infinity # adapter _lowerCamelCase : Union[str, Any] = add_adapter _lowerCamelCase : List[Any] = adapter_kernel_size _lowerCamelCase : Optional[Any] = adapter_stride _lowerCamelCase : List[Any] = num_adapter_layers _lowerCamelCase : int = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCamelCase : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCamelCase : List[str] = list(lowercase ) _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Any = list(lowercase ) _lowerCamelCase : Optional[Any] = xvector_output_dim @property def A_ ( self ): return math.prod(self.conv_stride )
12
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
358
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool lowercase__ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """facebook/nllb-200-distilled-600M""" lowerCamelCase__ = ( """This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """ """be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """ """which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """ """plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`.""" ) lowerCamelCase__ = """translator""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = LANGUAGE_CODES lowerCamelCase__ = ["""text""", """text""", """text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase , lowercase , lowercase ): if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) _lowerCamelCase : str = self.lang_to_code[src_lang] _lowerCamelCase : int = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase ) def A_ ( self , lowercase ): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase )
12
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowercase__ = logging.get_logger(__name__) lowercase__ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) lowercase__ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowercase__ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowercase__ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) lowercase__ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) lowercase__ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) lowercase__ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) lowercase__ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) lowercase__ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) lowercase__ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) lowercase__ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) lowercase__ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) lowercase__ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) lowercase__ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowercase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowercase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowercase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowercase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowercase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowercase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowercase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowercase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowercase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_MAPPING lowercase__ = auto_class_update(FlaxAutoModel) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowercase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowercase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowercase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowercase__ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase__ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowercase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowercase__ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowercase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowercase__ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowercase__ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowercase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowerCAmelCase__ ( _BaseAutoModelClass ): '''simple docstring''' lowerCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowercase__ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
359
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = hf_hub_download( repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 ) _lowerCamelCase : List[str] = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def A_ ( self , lowercase , lowercase ): for example in examples: _lowerCamelCase : Tuple = video_classifier(lowercase ) self.assertEqual( lowercase , [ {'score': ANY(lowercase ), 'label': ANY(lowercase )}, {'score': ANY(lowercase ), 'label': ANY(lowercase )}, ] , ) @require_torch def A_ ( self ): _lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' _lowerCamelCase : Tuple = VideoMAEFeatureExtractor( size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} ) _lowerCamelCase : Dict = pipeline( 'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 ) _lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , ) _lowerCamelCase : str = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], ] , ) @require_tf def A_ ( self ): pass
12
0
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """char""" lowerCamelCase__ = """bpe""" lowerCamelCase__ = """wp""" lowercase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""image_processor""", """char_tokenizer"""] lowerCamelCase__ = """ViTImageProcessor""" lowerCamelCase__ = """MgpstrTokenizer""" def __init__( self , lowercase=None , lowercase=None , **lowercase ): _lowerCamelCase : int = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowercase , ) _lowerCamelCase : List[Any] = kwargs.pop('feature_extractor' ) _lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) _lowerCamelCase : Optional[int] = tokenizer _lowerCamelCase : Any = AutoTokenizer.from_pretrained('gpt2' ) _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(lowercase , lowercase ) def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ): if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: _lowerCamelCase : Dict = self.image_processor(lowercase , return_tensors=lowercase , **lowercase ) if text is not None: _lowerCamelCase : Tuple = self.char_tokenizer(lowercase , return_tensors=lowercase , **lowercase ) if text is None: return inputs elif images is None: return encodings else: _lowerCamelCase : Optional[int] = encodings['input_ids'] return inputs def A_ ( self , lowercase ): _lowerCamelCase : str = sequences _lowerCamelCase : Dict = char_preds.size(0 ) _lowerCamelCase : Union[str, Any] = self._decode_helper(lowercase , 'char' ) _lowerCamelCase : Tuple = self._decode_helper(lowercase , 'bpe' ) _lowerCamelCase : int = self._decode_helper(lowercase , 'wp' ) _lowerCamelCase : Tuple = [] _lowerCamelCase : int = [] for i in range(lowercase ): _lowerCamelCase : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowerCamelCase : Tuple = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowerCamelCase : List[Any] = scores.index(max(lowercase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowerCamelCase : str = {} _lowerCamelCase : Dict = final_strs _lowerCamelCase : Optional[Any] = final_scores _lowerCamelCase : int = char_strs _lowerCamelCase : List[Any] = bpe_strs _lowerCamelCase : Any = wp_strs return out def A_ ( self , lowercase , lowercase ): if format == DecodeType.CHARACTER: _lowerCamelCase : Any = self.char_decode _lowerCamelCase : List[str] = 1 _lowerCamelCase : Optional[Any] = '[s]' elif format == DecodeType.BPE: _lowerCamelCase : Union[str, Any] = self.bpe_decode _lowerCamelCase : int = 2 _lowerCamelCase : Dict = '#' elif format == DecodeType.WORDPIECE: _lowerCamelCase : List[Any] = self.wp_decode _lowerCamelCase : Optional[int] = 102 _lowerCamelCase : Tuple = '[SEP]' else: raise ValueError(F'''Format {format} is not supported.''' ) _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[int] = pred_logits.size(0 ) _lowerCamelCase : Optional[int] = pred_logits.size(1 ) _lowerCamelCase : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=lowercase , sorted=lowercase ) _lowerCamelCase : Dict = preds_index.view(-1 , lowercase )[:, 1:] _lowerCamelCase : Union[str, Any] = decoder(lowercase ) _lowerCamelCase : Dict = torch.nn.functional.softmax(lowercase , dim=2 ).max(dim=2 ) _lowerCamelCase : int = preds_max_prob[:, 1:] for index in range(lowercase ): _lowerCamelCase : int = preds_str[index].find(lowercase ) _lowerCamelCase : int = preds_str[index][:pred_eos] _lowerCamelCase : Tuple = preds_index[index].cpu().tolist() _lowerCamelCase : str = pred_index.index(lowercase ) if eos_token in pred_index else -1 _lowerCamelCase : List[str] = preds_max_prob[index][: pred_eos_index + 1] _lowerCamelCase : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowercase ) conf_scores.append(lowercase ) return dec_strs, conf_scores def A_ ( self , lowercase ): _lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowercase )] return decode_strs def A_ ( self , lowercase ): return self.bpe_tokenizer.batch_decode(lowercase ) def A_ ( self , lowercase ): _lowerCamelCase : str = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowercase )] return decode_strs
360
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
0
"""simple docstring""" from PIL import Image def _snake_case ( lowercase__ , lowercase__ ): def brightness(lowercase__ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 lowercase__ = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
361
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): if attention_mask is None: _lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = OPTConfig lowerCamelCase__ = {} lowerCamelCase__ = """gelu""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ): _lowerCamelCase : Tuple = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Tuple = seq_length _lowerCamelCase : str = is_training _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : List[Any] = eos_token_id _lowerCamelCase : Tuple = pad_token_id _lowerCamelCase : List[str] = bos_token_id _lowerCamelCase : Optional[int] = embed_dim _lowerCamelCase : List[str] = word_embed_proj_dim _lowerCamelCase : Any = False def A_ ( self ): _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCamelCase : Tuple = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , ) _lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase ) return config, inputs_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase ) _lowerCamelCase : Optional[Any] = inputs_dict['input_ids'] _lowerCamelCase : str = input_ids[:1, :] _lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :] _lowerCamelCase : Optional[Any] = 1 # first forward pass _lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) _lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) _lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0] _lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else () lowerCamelCase__ = ( {"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = 10 def A_ ( self ): _lowerCamelCase : int = TFOPTModelTester(self ) _lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase , lowercase ): if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings _lowerCamelCase : Optional[int] = model_class(config=lowercase ) _lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase ) _lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. _lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase ) # check that weights remain the same after resizing _lowerCamelCase : int = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Optional[Any] = False self.assertTrue(lowercase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase ) _lowerCamelCase : Dict = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Union[str, Any] = False self.assertTrue(lowercase ) def _snake_case ( lowercase__ ): return tf.constant(lowercase__ , dtype=tf.intaa ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = 99 def A_ ( self ): _lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2 _lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) _lowerCamelCase : int = input_ids.shape[0] _lowerCamelCase : List[Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' ) _lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id ) with tf.GradientTape(): _lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state _lowerCamelCase : Optional[Any] = (1, 11, 512) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : List[str] = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) ) _lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): super().setUp() _lowerCamelCase : List[Any] = 'facebook/opt-350m' def A_ ( self ): _lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model ) _lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model ) _lowerCamelCase : List[str] = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase ) _lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) _lowerCamelCase : Any = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) _lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def A_ ( self ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def A_ ( self ): _lowerCamelCase : str = 'facebook/opt-125m' _lowerCamelCase : Dict = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : int = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = 'facebook/opt-350m' _lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase ) _lowerCamelCase : Any = 'left' # use different length sentences to test batching _lowerCamelCase : Optional[int] = [ 'Hello, my dog is a little', 'Today, I', ] _lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase ) _lowerCamelCase : int = inputs['input_ids'] _lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] ) _lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase ) _lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) _lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) _lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) def A_ ( self ): _lowerCamelCase : Tuple = 'facebook/opt-350m' _lowerCamelCase : List[Any] = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase )
12
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""", """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json""" ), """distilbert-base-uncased-finetuned-sst-2-english""": ( """https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json""" ), } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """distilbert""" lowerCamelCase__ = { """hidden_size""": """dim""", """num_attention_heads""": """n_heads""", """num_hidden_layers""": """n_layers""", } def __init__( self , lowercase=30522 , lowercase=512 , lowercase=False , lowercase=6 , lowercase=12 , lowercase=768 , lowercase=4 * 768 , lowercase=0.1 , lowercase=0.1 , lowercase="gelu" , lowercase=0.02 , lowercase=0.1 , lowercase=0.2 , lowercase=0 , **lowercase , ): _lowerCamelCase : Any = vocab_size _lowerCamelCase : int = max_position_embeddings _lowerCamelCase : Optional[int] = sinusoidal_pos_embds _lowerCamelCase : List[str] = n_layers _lowerCamelCase : Optional[Any] = n_heads _lowerCamelCase : Any = dim _lowerCamelCase : List[Any] = hidden_dim _lowerCamelCase : List[Any] = dropout _lowerCamelCase : Optional[int] = attention_dropout _lowerCamelCase : Tuple = activation _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = qa_dropout _lowerCamelCase : List[str] = seq_classif_dropout super().__init__(**lowercase , pad_token_id=lowercase ) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @property def A_ ( self ): if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCamelCase : Dict = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
362
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """philschmid/bart-large-cnn-samsum""" lowerCamelCase__ = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) lowerCamelCase__ = """summarizer""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = ["""text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase ): return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase )[0] def A_ ( self , lowercase ): return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
12
0
"""simple docstring""" from random import shuffle import tensorflow as tf from numpy import array def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : int = int(lowercase__ ) assert noofclusters < len(lowercase__ ) # Find out the dimensionality _lowerCamelCase : int = len(vectors[0] ) # Will help select random centroids from among the available vectors _lowerCamelCase : int = list(range(len(lowercase__ ) ) ) shuffle(lowercase__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _lowerCamelCase : Tuple = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _lowerCamelCase : Tuple = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _lowerCamelCase : Optional[int] = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values _lowerCamelCase : List[Any] = tf.placeholder('float64' , [dim] ) _lowerCamelCase : str = [] for centroid in centroids: cent_assigns.append(tf.assign(lowercase__ , lowercase__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _lowerCamelCase : Tuple = [tf.Variable(0 ) for i in range(len(lowercase__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value _lowerCamelCase : Any = tf.placeholder('int32' ) _lowerCamelCase : Union[str, Any] = [] for assignment in assignments: cluster_assigns.append(tf.assign(lowercase__ , lowercase__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _lowerCamelCase : Tuple = tf.placeholder('float' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _lowerCamelCase : Tuple = tf.reduce_mean(lowercase__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input _lowerCamelCase : Dict = tf.placeholder('float' , [dim] ) _lowerCamelCase : Optional[Any] = tf.placeholder('float' , [dim] ) _lowerCamelCase : int = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase__ , lowercase__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _lowerCamelCase : Any = tf.placeholder('float' , [noofclusters] ) _lowerCamelCase : List[str] = tf.argmin(lowercase__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _lowerCamelCase : str = tf.initialize_all_variables() # Initialize all variables sess.run(lowercase__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _lowerCamelCase : Dict = 100 for _ in range(lowercase__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(lowercase__ ) ): _lowerCamelCase : str = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _lowerCamelCase : List[str] = [ sess.run(lowercase__ , feed_dict={va: vect, va: sess.run(lowercase__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _lowerCamelCase : int = sess.run( lowercase__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(lowercase__ ): # Collect all the vectors assigned to this cluster _lowerCamelCase : int = [ vectors[i] for i in range(len(lowercase__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _lowerCamelCase : Union[str, Any] = sess.run( lowercase__ , feed_dict={mean_input: array(lowercase__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments _lowerCamelCase : Tuple = sess.run(lowercase__ ) _lowerCamelCase : Any = sess.run(lowercase__ ) return centroids, assignments
363
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) ) _lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )] index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ ) _lowerCamelCase : float = 0 _lowerCamelCase : list[float] = [0] * len(lowercase__ ) for i in index: if weight[i] <= capacity: _lowerCamelCase : int = 1 max_value += value[i] capacity -= weight[i] else: _lowerCamelCase : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import functools def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : int = len(lowercase__ ) _lowerCamelCase : Any = len(lowercase__ ) @functools.cache def min_distance(lowercase__ , lowercase__ ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa _lowerCamelCase : int = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowercase__ ) , 1 + min_distance(lowercase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
364
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowercase__ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowercase__ = [] lowercase__ = [] lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowercase__ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", """emoji""": True, }, } ] lowercase__ = 0 for log in Path().glob("""*.log"""): lowercase__ = 0 with open(log, """r""") as f: for line in f: lowercase__ = json.loads(line) if line.get("""nodeid""", """""") != "": lowercase__ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowercase__ = F"{line['duration']:.4f}" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowercase__ = [] log.unlink() lowercase__ = """""" lowercase__ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowercase__ = [] lowercase__ = {} for test in failed_tests: lowercase__ = test[0].split("""::""") lowercase__ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowercase__ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowercase__ = [test[0] for test in failed_table] lowercase__ = list(set(files)) # Count number of instances in failed_tests lowercase__ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowercase__ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: lowercase__ = """Too many failed tests, please see the full report in the Action results.""" lowercase__ = len(err) + 10 lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}" print(F"### {message}") else: lowercase__ = """No failed tests! 🤗""" print(F"## {message}") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) lowercase__ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowercase__ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowercase__ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowercase__ = row[0] else: lowercase__ = """""" lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
12
0
"""simple docstring""" import argparse import datetime def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } _lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase__ ) < 11: raise ValueError('Must be 10 characters long' ) # Get month _lowerCamelCase : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) _lowerCamelCase : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day _lowerCamelCase : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator _lowerCamelCase : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year _lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation _lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) ) # Start math if m <= 2: _lowerCamelCase : str = y - 1 _lowerCamelCase : Tuple = m + 12 # maths var _lowerCamelCase : int = int(str(lowercase__ )[:2] ) _lowerCamelCase : int = int(str(lowercase__ )[2:] ) _lowerCamelCase : int = int(2.6 * m - 5.3_9 ) _lowerCamelCase : int = int(c / 4 ) _lowerCamelCase : int = int(k / 4 ) _lowerCamelCase : int = int(d + k ) _lowerCamelCase : int = int(t + u + v + x ) _lowerCamelCase : int = int(z - (2 * c) ) _lowerCamelCase : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response _lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) lowercase__ = parser.parse_args() zeller(args.date_input)
365
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """AutoTokenizer""" lowerCamelCase__ = ["""tokenizer"""] lowerCamelCase__ = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , lowercase , lowercase=None ): super().__init__(lowercase ) _lowerCamelCase : Optional[int] = speaker_embeddings @classmethod def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: _lowerCamelCase : Optional[Any] = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'''`{os.path.join(lowercase , lowercase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) _lowerCamelCase : List[Any] = None else: with open(lowercase ) as speaker_embeddings_json: _lowerCamelCase : Union[str, Any] = json.load(lowercase ) else: _lowerCamelCase : Tuple = None _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase ) _lowerCamelCase : int = {} _lowerCamelCase : List[Any] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase ) _lowerCamelCase : Any = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , ) _lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' ) _lowerCamelCase : Optional[Any] = tmp_dict with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def A_ ( self , lowercase = None , **lowercase ): _lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset] _lowerCamelCase : Any = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) _lowerCamelCase : Union[str, Any] = get_file_from_repo( self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if path is None: raise ValueError( F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) _lowerCamelCase : List[str] = np.load(lowercase ) return voice_preset_dict def A_ ( self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _lowerCamelCase : Any = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ): _lowerCamelCase : Optional[Any] = voice_preset + '.npz' _lowerCamelCase : Union[str, Any] = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) _lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase ) _lowerCamelCase : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: _lowerCamelCase : Optional[int] = voice_preset return encoded_text
12
0
"""simple docstring""" class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase ): _lowerCamelCase : Optional[Any] = n _lowerCamelCase : Tuple = [None] * self.n _lowerCamelCase : Any = 0 # index of the first element _lowerCamelCase : Any = 0 _lowerCamelCase : Optional[int] = 0 def __len__( self ): return self.size def A_ ( self ): return self.size == 0 def A_ ( self ): return False if self.is_empty() else self.array[self.front] def A_ ( self , lowercase ): if self.size >= self.n: raise Exception('QUEUE IS FULL' ) _lowerCamelCase : str = data _lowerCamelCase : Optional[Any] = (self.rear + 1) % self.n self.size += 1 return self def A_ ( self ): if self.size == 0: raise Exception('UNDERFLOW' ) _lowerCamelCase : Union[str, Any] = self.array[self.front] _lowerCamelCase : List[Any] = None _lowerCamelCase : Union[str, Any] = (self.front + 1) % self.n self.size -= 1 return temp
366
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowercase__ = False class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCamelCase : Dict = torch.manual_seed(0 ) _lowerCamelCase : Dict = pipe( image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _lowerCamelCase : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
12
0
"""simple docstring""" lowercase__ = { """km/h""": 1.0, """m/s""": 3.6, """mph""": 1.609344, """knot""": 1.852, } lowercase__ = { """km/h""": 1.0, """m/s""": 0.277777778, """mph""": 0.621371192, """knot""": 0.539956803, } def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if unit_to not in speed_chart or unit_from not in speed_chart_inverse: _lowerCamelCase : Optional[Any] = ( f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n''' f'''Valid values are: {', '.join(lowercase__ )}''' ) raise ValueError(lowercase__ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
367
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase__ = { """E""": 12.70, """T""": 9.06, """A""": 8.17, """O""": 7.51, """I""": 6.97, """N""": 6.75, """S""": 6.33, """H""": 6.09, """R""": 5.99, """D""": 4.25, """L""": 4.03, """C""": 2.78, """U""": 2.76, """M""": 2.41, """W""": 2.36, """F""": 2.23, """G""": 2.02, """Y""": 1.97, """P""": 1.93, """B""": 1.29, """V""": 0.98, """K""": 0.77, """J""": 0.15, """X""": 0.15, """Q""": 0.10, """Z""": 0.07, } lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( lowercase__ ): return x[0] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = get_letter_count(lowercase__ ) _lowerCamelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase__ ) _lowerCamelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ ) _lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] ) _lowerCamelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase__ , reverse=lowercase__ ) _lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : str = get_frequency_order(lowercase__ ) _lowerCamelCase : Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """data2vec-audio""" def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) _lowerCamelCase : str = hidden_size _lowerCamelCase : str = feat_extract_activation _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Optional[Any] = conv_bias _lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings _lowerCamelCase : List[Any] = num_conv_pos_embedding_groups _lowerCamelCase : List[Any] = conv_pos_kernel_size _lowerCamelCase : Optional[int] = len(self.conv_dim ) _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Any = intermediate_size _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Any = hidden_dropout _lowerCamelCase : Union[str, Any] = attention_dropout _lowerCamelCase : str = activation_dropout _lowerCamelCase : Any = feat_proj_dropout _lowerCamelCase : Tuple = final_dropout _lowerCamelCase : Union[str, Any] = layerdrop _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Optional[Any] = mask_time_prob _lowerCamelCase : List[Any] = mask_time_length _lowerCamelCase : List[Any] = mask_time_min_masks _lowerCamelCase : Tuple = mask_feature_prob _lowerCamelCase : Optional[Any] = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # ctc loss _lowerCamelCase : Tuple = ctc_loss_reduction _lowerCamelCase : str = ctc_zero_infinity # adapter _lowerCamelCase : Union[str, Any] = add_adapter _lowerCamelCase : List[Any] = adapter_kernel_size _lowerCamelCase : Optional[Any] = adapter_stride _lowerCamelCase : List[Any] = num_adapter_layers _lowerCamelCase : int = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCamelCase : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCamelCase : List[str] = list(lowercase ) _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Any = list(lowercase ) _lowerCamelCase : Optional[Any] = xvector_output_dim @property def A_ ( self ): return math.prod(self.conv_stride )
368
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase ): _lowerCamelCase : Dict = question_encoder _lowerCamelCase : List[Any] = generator _lowerCamelCase : Optional[Any] = self.question_encoder def A_ ( self , lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' ) _lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' ) self.question_encoder.save_pretrained(lowercase ) self.generator.save_pretrained(lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase ) if config is None: _lowerCamelCase : int = RagConfig.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' ) _lowerCamelCase : Dict = AutoTokenizer.from_pretrained( lowercase , config=config.generator , subfolder='generator_tokenizer' ) return cls(question_encoder=lowercase , generator=lowercase ) def __call__( self , *lowercase , **lowercase ): return self.current_tokenizer(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.decode(*lowercase , **lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.question_encoder def A_ ( self ): _lowerCamelCase : Optional[Any] = self.generator def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ): warnings.warn( '`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ' 'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ' 'context manager to prepare your targets. See the documentation of your specific tokenizer for more ' 'details' , lowercase , ) if max_length is None: _lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length _lowerCamelCase : Optional[Any] = self( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _lowerCamelCase : int = self.current_tokenizer.model_max_length _lowerCamelCase : str = self( text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , ) _lowerCamelCase : int = labels['input_ids'] return model_inputs
12
0
"""simple docstring""" import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": lowercase__ = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """))) print("""Googling.....""") lowercase__ = F"https://www.google.com/search?q={query}&num=100" lowercase__ = requests.get( url, headers={"""User-Agent""": str(UserAgent().random)}, ) try: lowercase__ = ( BeautifulSoup(res.text, """html.parser""") .find("""div""", attrs={"""class""": """yuRUbf"""}) .find("""a""") .get("""href""") ) except AttributeError: lowercase__ = parse_qs( BeautifulSoup(res.text, """html.parser""") .find("""div""", attrs={"""class""": """kCrYT"""}) .find("""a""") .get("""href""") )["""url"""][0] webbrowser.open(link)
369
"""simple docstring""" def _snake_case ( lowercase__ = 10 ): if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError('Invalid input' ) _lowerCamelCase : str = 10**n _lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"{solution(10) = }")
12
0
"""simple docstring""" import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() lowercase__ = logging.get_logger(__name__) lowercase__ = """https://openaipublic.azureedge.net/jukebox/models/""" lowercase__ = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def _snake_case ( lowercase__ ): if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10: _lowerCamelCase : List[str] = key.replace('.model.1.bias' , '.conv1d_1.bias' ) elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10: _lowerCamelCase : List[Any] = key.replace('.model.1.weight' , '.conv1d_1.weight' ) elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10: _lowerCamelCase : Any = key.replace('.model.3.bias' , '.conv1d_2.bias' ) elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10: _lowerCamelCase : List[str] = key.replace('.model.3.weight' , '.conv1d_2.weight' ) if "conditioner_blocks.0." in key: _lowerCamelCase : List[str] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' ) if "prime_prior" in key: _lowerCamelCase : Optional[int] = key.replace('prime_prior' , 'encoder' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: _lowerCamelCase : Optional[int] = key.replace('.emb.' , '.' ) if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('.k' , '.codebook' ) if "y_emb." in key: return key.replace('y_emb.' , 'metadata_embedding.' ) if "x_emb.emb." in key: _lowerCamelCase : List[str] = key.replace('0.x_emb.emb' , 'embed_tokens' ) if "prime_state_ln" in key: return key.replace('prime_state_ln' , 'encoder.final_layer_norm' ) if ".ln" in key: return key.replace('.ln' , '.layer_norm' ) if "_ln" in key: return key.replace('_ln' , '_layer_norm' ) if "prime_state_proj" in key: return key.replace('prime_state_proj' , 'encoder.proj_in' ) if "prime_x_out" in key: return key.replace('prime_x_out' , 'encoder.lm_head' ) if "prior.x_out" in key: return key.replace('x_out' , 'fc_proj_out' ) if "x_emb" in key: return key.replace('x_emb' , 'embed_tokens' ) return key def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Optional[Any] = {} import re _lowerCamelCase : Any = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) _lowerCamelCase : Union[str, Any] = re.compile( r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase : Dict = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase : Dict = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) _lowerCamelCase : Tuple = re.compile( r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase : str = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase : Optional[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' ) _lowerCamelCase : Tuple = re.compile( r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) _lowerCamelCase : List[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(lowercase__ ): _lowerCamelCase : Optional[int] = re_encoder_block_conv_in.match(lowercase__ ) _lowerCamelCase : List[str] = regex_match.groups() _lowerCamelCase : List[str] = int(groups[2] ) * 2 + int(groups[3] ) _lowerCamelCase : List[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' _lowerCamelCase : Optional[int] = re_encoder_block_conv_in.sub(lowercase__ , lowercase__ ) elif re_encoder_block_resnet.fullmatch(lowercase__ ): _lowerCamelCase : Optional[int] = re_encoder_block_resnet.match(lowercase__ ) _lowerCamelCase : Union[str, Any] = regex_match.groups() _lowerCamelCase : List[str] = int(groups[2] ) * 2 + int(groups[3] ) _lowerCamelCase : Dict = {'1': 1, '3': 2}[groups[-2]] _lowerCamelCase : str = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' _lowerCamelCase : int = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' _lowerCamelCase : Union[str, Any] = prefix + resnet_block _lowerCamelCase : List[str] = re_encoder_block_resnet.sub(lowercase__ , lowercase__ ) elif re_encoder_block_proj_out.fullmatch(lowercase__ ): _lowerCamelCase : Tuple = re_encoder_block_proj_out.match(lowercase__ ) _lowerCamelCase : int = regex_match.groups() _lowerCamelCase : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' _lowerCamelCase : int = re_encoder_block_proj_out.sub(lowercase__ , lowercase__ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(lowercase__ ): _lowerCamelCase : Tuple = re_decoder_block_conv_out.match(lowercase__ ) _lowerCamelCase : str = regex_match.groups() _lowerCamelCase : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2 _lowerCamelCase : Dict = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' _lowerCamelCase : Optional[Any] = re_decoder_block_conv_out.sub(lowercase__ , lowercase__ ) elif re_decoder_block_resnet.fullmatch(lowercase__ ): _lowerCamelCase : int = re_decoder_block_resnet.match(lowercase__ ) _lowerCamelCase : Optional[Any] = regex_match.groups() _lowerCamelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2 _lowerCamelCase : Union[str, Any] = {'1': 1, '3': 2}[groups[-2]] _lowerCamelCase : int = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' _lowerCamelCase : Tuple = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' _lowerCamelCase : Tuple = prefix + resnet_block _lowerCamelCase : int = re_decoder_block_resnet.sub(lowercase__ , lowercase__ ) elif re_decoder_block_proj_in.fullmatch(lowercase__ ): _lowerCamelCase : Optional[Any] = re_decoder_block_proj_in.match(lowercase__ ) _lowerCamelCase : Optional[Any] = regex_match.groups() _lowerCamelCase : str = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' _lowerCamelCase : Optional[Any] = re_decoder_block_proj_in.sub(lowercase__ , lowercase__ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(lowercase__ ): _lowerCamelCase : List[str] = re_prior_cond_conv_out.match(lowercase__ ) _lowerCamelCase : Union[str, Any] = regex_match.groups() _lowerCamelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 _lowerCamelCase : List[str] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' _lowerCamelCase : int = re_prior_cond_conv_out.sub(lowercase__ , lowercase__ ) elif re_prior_cond_resnet.fullmatch(lowercase__ ): _lowerCamelCase : Union[str, Any] = re_prior_cond_resnet.match(lowercase__ ) _lowerCamelCase : Optional[int] = regex_match.groups() _lowerCamelCase : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 _lowerCamelCase : List[Any] = {'1': 1, '3': 2}[groups[-2]] _lowerCamelCase : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' _lowerCamelCase : List[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' _lowerCamelCase : List[str] = prefix + resnet_block _lowerCamelCase : Union[str, Any] = re_prior_cond_resnet.sub(lowercase__ , lowercase__ ) elif re_prior_cond_proj_in.fullmatch(lowercase__ ): _lowerCamelCase : Optional[int] = re_prior_cond_proj_in.match(lowercase__ ) _lowerCamelCase : List[Any] = regex_match.groups() _lowerCamelCase : Dict = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' _lowerCamelCase : int = re_prior_cond_proj_in.sub(lowercase__ , lowercase__ ) # keep original key else: _lowerCamelCase : int = original_key _lowerCamelCase : Tuple = replace_key(lowercase__ ) if f'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(f'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape: _lowerCamelCase : Optional[Any] = model_state_dict[f'''{key_prefix}.{key}'''] print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) _lowerCamelCase : int = original_key _lowerCamelCase : List[Any] = original_key _lowerCamelCase : int = value return new_dict @torch.no_grad() def _snake_case ( lowercase__=None , lowercase__=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): _lowerCamelCase : List[str] = requests.get(f'''{PREFIX}{file}''' , allow_redirects=lowercase__ ) os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=lowercase__ ) open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , 'wb' ).write(r.content ) _lowerCamelCase : int = MODEL_MAPPING[model_name.split('/' )[-1]] _lowerCamelCase : List[str] = JukeboxConfig.from_pretrained(lowercase__ ) _lowerCamelCase : int = JukeboxModel(lowercase__ ) _lowerCamelCase : Any = [] _lowerCamelCase : Dict = {} for i, dict_name in enumerate(lowercase__ ): _lowerCamelCase : int = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['model'] _lowerCamelCase : List[Any] = {} for k in old_dic.keys(): if k.endswith('.b' ): _lowerCamelCase : Dict = old_dic[k] elif k.endswith('.w' ): _lowerCamelCase : Optional[int] = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: _lowerCamelCase : str = old_dic[k] else: _lowerCamelCase : Tuple = old_dic[k] _lowerCamelCase : Union[str, Any] = 'vqvae' if i == 0 else f'''priors.{3 - i}''' _lowerCamelCase : List[str] = fix_jukebox_keys(lowercase__ , model.state_dict() , lowercase__ , lowercase__ ) weight_dict.append(lowercase__ ) _lowerCamelCase : Union[str, Any] = weight_dict.pop(0 ) model.vqvae.load_state_dict(lowercase__ ) for i in range(len(lowercase__ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) with open(f'''{pytorch_dump_folder_path}/mapping.json''' , 'w' ) as txtfile: json.dump(lowercase__ , lowercase__ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase__ ) return weight_dict if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) lowercase__ = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
370
"""simple docstring""" import argparse import datetime def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } _lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase__ ) < 11: raise ValueError('Must be 10 characters long' ) # Get month _lowerCamelCase : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) _lowerCamelCase : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day _lowerCamelCase : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator _lowerCamelCase : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year _lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation _lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) ) # Start math if m <= 2: _lowerCamelCase : str = y - 1 _lowerCamelCase : Tuple = m + 12 # maths var _lowerCamelCase : int = int(str(lowercase__ )[:2] ) _lowerCamelCase : int = int(str(lowercase__ )[2:] ) _lowerCamelCase : int = int(2.6 * m - 5.3_9 ) _lowerCamelCase : int = int(c / 4 ) _lowerCamelCase : int = int(k / 4 ) _lowerCamelCase : int = int(d + k ) _lowerCamelCase : int = int(t + u + v + x ) _lowerCamelCase : int = int(z - (2 * c) ) _lowerCamelCase : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response _lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) lowercase__ = parser.parse_args() zeller(args.date_input)
12
0
from ..utils import DummyObject, requires_backends class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] ) class lowerCAmelCase__ ( metaclass=lowercase ): '''simple docstring''' lowerCamelCase__ = ["""sentencepiece"""] def __init__( self , *lowercase , **lowercase ): requires_backends(self , ['sentencepiece'] )
371
"""simple docstring""" import re def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' ) if match := re.search(lowercase__ , lowercase__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
12
0
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowercase__ = 4 lowercase__ = 3 class lowerCAmelCase__ ( lowercase ): '''simple docstring''' pass def _snake_case ( lowercase__ ): for shard in shards: for i in range(lowercase__ ): yield {"i": i, "shard": shard} def _snake_case ( ): _lowerCamelCase : str = int(os.environ['RANK'] ) _lowerCamelCase : Tuple = int(os.environ['WORLD_SIZE'] ) _lowerCamelCase : List[str] = ArgumentParser() parser.add_argument('--streaming' , type=lowercase__ ) parser.add_argument('--local_rank' , type=lowercase__ ) parser.add_argument('--num_workers' , type=lowercase__ , default=0 ) _lowerCamelCase : str = parser.parse_args() _lowerCamelCase : Any = args.streaming _lowerCamelCase : Tuple = args.num_workers _lowerCamelCase : Dict = {'shards': [f'''shard_{shard_idx}''' for shard_idx in range(lowercase__ )]} _lowerCamelCase : Dict = IterableDataset.from_generator(lowercase__ , gen_kwargs=lowercase__ ) if not streaming: _lowerCamelCase : Tuple = Dataset.from_list(list(lowercase__ ) ) _lowerCamelCase : str = split_dataset_by_node(lowercase__ , rank=lowercase__ , world_size=lowercase__ ) _lowerCamelCase : Optional[Any] = torch.utils.data.DataLoader(lowercase__ , num_workers=lowercase__ ) _lowerCamelCase : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD _lowerCamelCase : List[str] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) _lowerCamelCase : Optional[Any] = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
350
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowercase__ = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} lowercase__ = """zero2""" lowercase__ = """zero3""" lowercase__ = [ZEROa, ZEROa] def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test lowercase__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) def A_ ( self , lowercase ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = models[model] _lowerCamelCase : Optional[int] = self.run_trainer( stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , ) self.do_checks(lowercase ) return output_dir def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase ) _lowerCamelCase : Any = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowercase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _lowerCamelCase : Dict = self.get_launcher(lowercase ) _lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase , env=self.get_env() ) return output_dir def A_ ( self , lowercase=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
12
0
"""simple docstring""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """xlm-prophetnet""" lowerCamelCase__ = ["""past_key_values"""] lowerCamelCase__ = { """num_attention_heads""": """num_encoder_attention_heads""", } def __init__( self , lowercase = 0.1 , lowercase = "gelu" , lowercase = 30522 , lowercase = 1024 , lowercase = 4096 , lowercase = 12 , lowercase = 16 , lowercase = 4096 , lowercase = 12 , lowercase = 16 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 0.02 , lowercase = True , lowercase = True , lowercase = 0 , lowercase = 2 , lowercase = 32 , lowercase = 128 , lowercase = False , lowercase = 0.0 , lowercase = True , lowercase = 0 , lowercase = 1 , lowercase = 2 , **lowercase , ): _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Optional[int] = encoder_ffn_dim _lowerCamelCase : Union[str, Any] = num_encoder_layers _lowerCamelCase : Union[str, Any] = num_encoder_attention_heads _lowerCamelCase : Optional[int] = decoder_ffn_dim _lowerCamelCase : int = num_decoder_layers _lowerCamelCase : Optional[Any] = num_decoder_attention_heads _lowerCamelCase : str = max_position_embeddings _lowerCamelCase : Optional[Any] = init_std # Normal(0, this parameter) _lowerCamelCase : int = activation_function # parameters for xlmprophetnet _lowerCamelCase : Tuple = ngram _lowerCamelCase : Optional[Any] = num_buckets _lowerCamelCase : Optional[int] = relative_max_distance _lowerCamelCase : List[str] = disable_ngram_loss _lowerCamelCase : Tuple = eps # 3 Types of Dropout _lowerCamelCase : str = attention_dropout _lowerCamelCase : List[Any] = activation_dropout _lowerCamelCase : Any = dropout _lowerCamelCase : List[str] = use_cache super().__init__( pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , add_cross_attention=lowercase , decoder_start_token_id=lowercase , **lowercase , ) @property def A_ ( self ): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def A_ ( self , lowercase ): raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and' ' `num_decoder_layers`.' )
351
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""pixel_values"""] def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ): super().__init__(**lowercase ) _lowerCamelCase : Optional[Any] = do_rescale _lowerCamelCase : Union[str, Any] = rescale_factor _lowerCamelCase : Any = do_pad _lowerCamelCase : Optional[int] = pad_size def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase = None ): _lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase ) _lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height _lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase ) def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): _lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad _lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size _lowerCamelCase : Dict = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. _lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images] if do_rescale: _lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_pad: _lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images] _lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] _lowerCamelCase : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=lowercase , tensor_type=lowercase )
12
0
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets lowercase__ = """\ @inproceedings{wang2019glue, title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, note={In the Proceedings of ICLR.}, year={2019} } """ lowercase__ = """\ GLUE, the General Language Understanding Evaluation benchmark (https://gluebenchmark.com/) is a collection of resources for training, evaluating, and analyzing natural language understanding systems. """ lowercase__ = """ Compute GLUE evaluation metric associated to each GLUE dataset. Args: predictions: list of predictions to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. Returns: depending on the GLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"pearson\": Pearson Correlation \"spearmanr\": Spearman Correlation \"matthews_correlation\": Matthew Correlation Examples: >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp' >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> glue_metric = datasets.load_metric('glue', 'stsb') >>> references = [0., 1., 2., 3., 4., 5.] >>> predictions = [0., 1., 2., 3., 4., 5.] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)}) {'pearson': 1.0, 'spearmanr': 1.0} >>> glue_metric = datasets.load_metric('glue', 'cola') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'matthews_correlation': 1.0} """ def _snake_case ( lowercase__ , lowercase__ ): return float((preds == labels).mean() ) def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Optional[int] = simple_accuracy(lowercase__ , lowercase__ ) _lowerCamelCase : Dict = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) ) return { "accuracy": acc, "f1": fa, } def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : Optional[int] = float(pearsonr(lowercase__ , lowercase__ )[0] ) _lowerCamelCase : int = float(spearmanr(lowercase__ , lowercase__ )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): '''simple docstring''' def A_ ( self ): if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def A_ ( self , lowercase , lowercase ): if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )} elif self.config_name == "stsb": return pearson_and_spearman(lowercase , lowercase ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(lowercase , lowercase ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(lowercase , lowercase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
352
"""simple docstring""" import os import string import sys lowercase__ = 1 << 8 lowercase__ = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } lowercase__ = KEYMAP["""up"""] lowercase__ = KEYMAP["""left"""] if sys.platform == "win32": lowercase__ = [] lowercase__ = { B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): lowercase__ = ord(str(i)) def _snake_case ( ): if os.name == "nt": import msvcrt _lowerCamelCase : Any = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowercase__ ) == 0: # Read the keystroke _lowerCamelCase : str = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCamelCase : List[Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(lowercase__ ) if ord(lowercase__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _lowerCamelCase : List[Any] = chr(KEYMAP['esc'] ) except KeyError: _lowerCamelCase : int = cha[1] else: _lowerCamelCase : Optional[int] = ch.decode(lowercase__ ) else: _lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCamelCase : List[str] = sys.stdin.fileno() _lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ ) try: tty.setraw(lowercase__ ) _lowerCamelCase : Optional[Any] = sys.stdin.read(1 ) finally: termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ ) return ch def _snake_case ( ): _lowerCamelCase : int = get_raw_chars() if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowercase__ ) == KEYMAP["esc"]: _lowerCamelCase : Union[str, Any] = get_raw_chars() if ord(lowercase__ ) == KEYMAP["mod_int"]: _lowerCamelCase : List[Any] = get_raw_chars() if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowercase__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
12
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : Tuple = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def A_ ( self ): _lowerCamelCase : Optional[int] = self.dummy_uncond_unet _lowerCamelCase : Union[str, Any] = KarrasVeScheduler() _lowerCamelCase : Any = KarrasVePipeline(unet=lowercase , scheduler=lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : str = torch.manual_seed(0 ) _lowerCamelCase : int = pipe(num_inference_steps=2 , generator=lowercase , output_type='numpy' ).images _lowerCamelCase : int = torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase , output_type='numpy' , return_dict=lowercase )[0] _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] _lowerCamelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCamelCase : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Tuple = 'google/ncsnpp-celebahq-256' _lowerCamelCase : List[Any] = UNetaDModel.from_pretrained(lowercase ) _lowerCamelCase : List[Any] = KarrasVeScheduler() _lowerCamelCase : Tuple = KarrasVePipeline(unet=lowercase , scheduler=lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Any = torch.manual_seed(0 ) _lowerCamelCase : int = pipe(num_inference_steps=20 , generator=lowercase , output_type='numpy' ).images _lowerCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _lowerCamelCase : Any = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
353
"""simple docstring""" from typing import Any def _snake_case ( lowercase__ ): if not input_list: return [] _lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list] _lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): # Initialise PyTorch model _lowerCamelCase : Optional[Any] = RemBertConfig.from_json_file(lowercase__ ) print('Building PyTorch model from configuration: {}'.format(str(lowercase__ ) ) ) _lowerCamelCase : List[str] = RemBertModel(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print('Save PyTorch model to {}'.format(lowercase__ ) ) torch.save(model.state_dict() , lowercase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--rembert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained RemBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowercase__ = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
354
"""simple docstring""" def _snake_case ( lowercase__ ): # if the collection is empty, returns empty if collection == []: return [] # get some information about the collection _lowerCamelCase : List[str] = len(lowercase__ ) _lowerCamelCase : List[str] = max(lowercase__ ) _lowerCamelCase : List[str] = min(lowercase__ ) # create the counting array _lowerCamelCase : List[Any] = coll_max + 1 - coll_min _lowerCamelCase : List[Any] = [0] * counting_arr_length # count how much a number appears in the collection for number in collection: counting_arr[number - coll_min] += 1 # sum each position with it's predecessors. now, counting_arr[i] tells # us how many elements <= i has in the collection for i in range(1 , lowercase__ ): _lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1] # create the output collection _lowerCamelCase : Dict = [0] * coll_len # place the elements in the output, respecting the original order (stable # sort) from end to begin, updating counting_arr for i in reversed(range(0 , lowercase__ ) ): _lowerCamelCase : Any = collection[i] counting_arr[collection[i] - coll_min] -= 1 return ordered def _snake_case ( lowercase__ ): return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] ) if __name__ == "__main__": # Test string sort assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt" lowercase__ = input("""Enter numbers separated by a comma:\n""").strip() lowercase__ = [int(item) for item in user_input.split(""",""")] print(counting_sort(unsorted))
12
0
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def _snake_case ( lowercase__ , lowercase__ ): if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
355
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) lowercase__ = parser.parse_args() lowercase__ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
12
0
"""simple docstring""" import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ): # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _lowerCamelCase : Any = torch.tensor(tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ).unsqueeze(0 ) # Batch size 1 _lowerCamelCase : List[str] = model(lowercase__ )[0] # The last hidden-state is the first element of the output tuple _lowerCamelCase : List[Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _lowerCamelCase : List[Any] = logits[0, masked_index, :] _lowerCamelCase : Optional[int] = logits.softmax(dim=0 ) _lowerCamelCase : Tuple = prob.topk(k=lowercase__ , dim=0 ) _lowerCamelCase : Union[str, Any] = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase__ ) )] ) _lowerCamelCase : Any = tokenizer.mask_token _lowerCamelCase : List[str] = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _lowerCamelCase : str = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(lowercase__ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(lowercase__ ) , lowercase__ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(lowercase__ , lowercase__ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs lowercase__ = CamembertTokenizer.from_pretrained("""camembert-base""") lowercase__ = CamembertForMaskedLM.from_pretrained("""camembert-base""") model.eval() lowercase__ = """Le camembert est <mask> :)""" print(fill_mask(masked_input, model, tokenizer, topk=3))
356
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = (UnCLIPScheduler,) def A_ ( self , **lowercase ): _lowerCamelCase : Any = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**lowercase ) return config def A_ ( self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=lowercase ) def A_ ( self ): for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=lowercase ) def A_ ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase ) def A_ ( self ): for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=lowercase ) def A_ ( self ): for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=lowercase ) def A_ ( self ): for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=lowercase , prev_timestep=lowercase ) def A_ ( self ): _lowerCamelCase : Optional[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' ) _lowerCamelCase : str = scheduler_class(**lowercase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5 def A_ ( self ): _lowerCamelCase : List[str] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' ) _lowerCamelCase : int = scheduler_class(**lowercase ) _lowerCamelCase : List[str] = 0.5 assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5 def A_ ( self ): _lowerCamelCase : List[Any] = self.scheduler_classes[0] _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : Tuple = scheduler_class(**lowercase ) _lowerCamelCase : Union[str, Any] = scheduler.timesteps _lowerCamelCase : Any = self.dummy_model() _lowerCamelCase : Optional[Any] = self.dummy_sample_deter _lowerCamelCase : Optional[int] = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : Tuple = model(lowercase , lowercase ) # 2. predict previous mean of sample x_t-1 _lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample _lowerCamelCase : Optional[int] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2 assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3 def A_ ( self ): _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Optional[Any] = scheduler_class(**lowercase ) scheduler.set_timesteps(25 ) _lowerCamelCase : Optional[Any] = scheduler.timesteps _lowerCamelCase : Optional[int] = self.dummy_model() _lowerCamelCase : Any = self.dummy_sample_deter _lowerCamelCase : str = torch.manual_seed(0 ) for i, t in enumerate(lowercase ): # 1. predict noise residual _lowerCamelCase : List[Any] = model(lowercase , lowercase ) if i + 1 == timesteps.shape[0]: _lowerCamelCase : Optional[int] = None else: _lowerCamelCase : List[str] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCamelCase : Union[str, Any] = scheduler.step( lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample _lowerCamelCase : List[Any] = pred_prev_sample _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) ) _lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) ) assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2 assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3 def A_ ( self ): pass def A_ ( self ): pass
12
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase__ = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
357
"""simple docstring""" import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """data2vec-audio""" def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) _lowerCamelCase : str = hidden_size _lowerCamelCase : str = feat_extract_activation _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Dict = list(lowercase ) _lowerCamelCase : Optional[Any] = conv_bias _lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings _lowerCamelCase : List[Any] = num_conv_pos_embedding_groups _lowerCamelCase : List[Any] = conv_pos_kernel_size _lowerCamelCase : Optional[int] = len(self.conv_dim ) _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Any = intermediate_size _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : Any = hidden_dropout _lowerCamelCase : Union[str, Any] = attention_dropout _lowerCamelCase : str = activation_dropout _lowerCamelCase : Any = feat_proj_dropout _lowerCamelCase : Tuple = final_dropout _lowerCamelCase : Union[str, Any] = layerdrop _lowerCamelCase : List[Any] = layer_norm_eps _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Tuple = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Optional[Any] = mask_time_prob _lowerCamelCase : List[Any] = mask_time_length _lowerCamelCase : List[Any] = mask_time_min_masks _lowerCamelCase : Tuple = mask_feature_prob _lowerCamelCase : Optional[Any] = mask_feature_length _lowerCamelCase : Dict = mask_feature_min_masks # ctc loss _lowerCamelCase : Tuple = ctc_loss_reduction _lowerCamelCase : str = ctc_zero_infinity # adapter _lowerCamelCase : Union[str, Any] = add_adapter _lowerCamelCase : List[Any] = adapter_kernel_size _lowerCamelCase : Optional[Any] = adapter_stride _lowerCamelCase : List[Any] = num_adapter_layers _lowerCamelCase : int = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCamelCase : Optional[int] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCamelCase : List[str] = list(lowercase ) _lowerCamelCase : Optional[Any] = list(lowercase ) _lowerCamelCase : Any = list(lowercase ) _lowerCamelCase : Optional[Any] = xvector_output_dim @property def A_ ( self ): return math.prod(self.conv_stride )
12
0
"""simple docstring""" import os import string import sys lowercase__ = 1 << 8 lowercase__ = { """tab""": ord("""\t"""), """newline""": ord("""\r"""), """esc""": 27, """up""": 65 + ARROW_KEY_FLAG, """down""": 66 + ARROW_KEY_FLAG, """right""": 67 + ARROW_KEY_FLAG, """left""": 68 + ARROW_KEY_FLAG, """mod_int""": 91, """undefined""": sys.maxsize, """interrupt""": 3, """insert""": 50, """delete""": 51, """pg_up""": 53, """pg_down""": 54, } lowercase__ = KEYMAP["""up"""] lowercase__ = KEYMAP["""left"""] if sys.platform == "win32": lowercase__ = [] lowercase__ = { B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG, B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG, B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG, B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG, } for i in range(10): lowercase__ = ord(str(i)) def _snake_case ( ): if os.name == "nt": import msvcrt _lowerCamelCase : Any = 'mbcs' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(lowercase__ ) == 0: # Read the keystroke _lowerCamelCase : str = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): _lowerCamelCase : List[Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: _lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) ) WIN_CH_BUFFER.append(lowercase__ ) if ord(lowercase__ ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) _lowerCamelCase : List[Any] = chr(KEYMAP['esc'] ) except KeyError: _lowerCamelCase : int = cha[1] else: _lowerCamelCase : Optional[int] = ch.decode(lowercase__ ) else: _lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty _lowerCamelCase : List[str] = sys.stdin.fileno() _lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ ) try: tty.setraw(lowercase__ ) _lowerCamelCase : Optional[Any] = sys.stdin.read(1 ) finally: termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ ) return ch def _snake_case ( ): _lowerCamelCase : int = get_raw_chars() if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(lowercase__ ) == KEYMAP["esc"]: _lowerCamelCase : Union[str, Any] = get_raw_chars() if ord(lowercase__ ) == KEYMAP["mod_int"]: _lowerCamelCase : List[Any] = get_raw_chars() if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(lowercase__ ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
358
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool lowercase__ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """facebook/nllb-200-distilled-600M""" lowerCamelCase__ = ( """This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """ """be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """ """which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """ """plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`.""" ) lowerCamelCase__ = """translator""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = LANGUAGE_CODES lowerCamelCase__ = ["""text""", """text""", """text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase , lowercase , lowercase ): if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) _lowerCamelCase : str = self.lang_to_code[src_lang] _lowerCamelCase : int = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase ) def A_ ( self , lowercase ): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase )
12
0
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer lowercase__ = logging.get_logger(__name__) lowercase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase__ = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } lowercase__ = { """Salesforce/codegen-350M-mono""": 2048, } class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = ["""input_ids""", """attention_mask"""] lowerCamelCase__ = CodeGenTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase=False , **lowercase , ): super().__init__( lowercase , lowercase , tokenizer_file=lowercase , unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , add_prefix_space=lowercase , **lowercase , ) if kwargs.pop('add_bos_token' , lowercase ): _lowerCamelCase : str = kwargs.pop('name_or_path' , '' ) raise ValueError( 'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.' 'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n' F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' 'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.' ' so that the fast tokenizer works correctly.' ) _lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , lowercase ) != add_prefix_space: _lowerCamelCase : Dict = getattr(lowercase , pre_tok_state.pop('type' ) ) _lowerCamelCase : Any = add_prefix_space _lowerCamelCase : List[Any] = pre_tok_class(**lowercase ) _lowerCamelCase : str = add_prefix_space def A_ ( self , *lowercase , **lowercase ): _lowerCamelCase : Any = kwargs.get('is_split_into_words' , lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): _lowerCamelCase : List[str] = kwargs.get('is_split_into_words' , lowercase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase , **lowercase ) def A_ ( self , lowercase , lowercase = None ): _lowerCamelCase : Optional[Any] = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase ) def A_ ( self , lowercase , lowercase = False , lowercase = None , lowercase = None , **lowercase , ): _lowerCamelCase : List[Any] = super().decode( token_ids=lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase , **lowercase , ) if truncate_before_pattern is not None and len(lowercase ) > 0: _lowerCamelCase : Dict = self.truncate(lowercase , lowercase ) return decoded_text def A_ ( self , lowercase , lowercase ): def find_re(lowercase , lowercase , lowercase ): _lowerCamelCase : List[str] = pattern.search(lowercase , lowercase ) return m.start() if m else -1 _lowerCamelCase : Dict = [re.compile(lowercase , re.MULTILINE ) for pattern in truncate_before_pattern] _lowerCamelCase : Tuple = list(re.finditer('^print' , lowercase , re.MULTILINE ) ) if len(lowercase ) > 1: _lowerCamelCase : Dict = completion[: prints[1].start()] _lowerCamelCase : Dict = list(re.finditer('^def' , lowercase , re.MULTILINE ) ) if len(lowercase ) > 1: _lowerCamelCase : Tuple = completion[: defs[1].start()] _lowerCamelCase : List[str] = 0 _lowerCamelCase : List[Any] = [ pos for pos in [find_re(lowercase , lowercase , lowercase ) for terminal in terminals] if pos != -1 ] if len(lowercase ) > 0: return completion[: min(lowercase )] else: return completion
359
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Optional[int] = hf_hub_download( repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 ) _lowerCamelCase : List[str] = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def A_ ( self , lowercase , lowercase ): for example in examples: _lowerCamelCase : Tuple = video_classifier(lowercase ) self.assertEqual( lowercase , [ {'score': ANY(lowercase ), 'label': ANY(lowercase )}, {'score': ANY(lowercase ), 'label': ANY(lowercase )}, ] , ) @require_torch def A_ ( self ): _lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' _lowerCamelCase : Tuple = VideoMAEFeatureExtractor( size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} ) _lowerCamelCase : Dict = pipeline( 'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 ) _lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , ) _lowerCamelCase : str = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(lowercase , decimals=4 ) , [ [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], ] , ) @require_tf def A_ ( self ): pass
12
0
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _snake_case ( ): _lowerCamelCase : Optional[int] = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' _lowerCamelCase : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' ) return image def _snake_case ( lowercase__ ): _lowerCamelCase : str = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : int = dct.pop(lowercase__ ) _lowerCamelCase : Optional[int] = val def _snake_case ( lowercase__ , lowercase__ ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _lowerCamelCase : Optional[int] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) _lowerCamelCase : int = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict _lowerCamelCase : Tuple = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) ) _lowerCamelCase : List[Any] = qkv_bias def _snake_case ( lowercase__ ): _lowerCamelCase : List[str] = 364 if 'coco' in model_name else 224 _lowerCamelCase : int = InstructBlipVisionConfig(image_size=lowercase__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: _lowerCamelCase : Union[str, Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _lowerCamelCase : Dict = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: _lowerCamelCase : str = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=32001 ).to_dict() elif "vicuna-13b" in model_name: _lowerCamelCase : str = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=32001 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 _lowerCamelCase : Optional[int] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict() _lowerCamelCase : Optional[int] = InstructBlipConfig(vision_config=lowercase__ , text_config=lowercase__ , qformer_config=lowercase__ ) return config, image_size @torch.no_grad() def _snake_case ( lowercase__ , lowercase__=None , lowercase__=False ): _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: _lowerCamelCase : List[Any] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) _lowerCamelCase : int = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) _lowerCamelCase : Any = get_blipa_config(lowercase__ ) _lowerCamelCase : Dict = InstructBlipForConditionalGeneration(lowercase__ ).eval() _lowerCamelCase : Optional[Any] = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } _lowerCamelCase : int = model_name_to_original[model_name] # load original model print('Loading original model...' ) _lowerCamelCase : Dict = 'cuda:1' if torch.cuda.is_available() else 'cpu' _lowerCamelCase : int = 'cuda:2' if torch.cuda.is_available() else 'cpu' _lowerCamelCase : Union[str, Any] = load_model_and_preprocess( name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ ) original_model.eval() print('Done!' ) # update state dict keys _lowerCamelCase : List[Any] = original_model.state_dict() _lowerCamelCase : str = create_rename_keys(lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _lowerCamelCase : List[str] = state_dict.pop(lowercase__ ) if key.startswith('Qformer.bert' ): _lowerCamelCase : Optional[Any] = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: _lowerCamelCase : Optional[int] = key.replace('self' , 'attention' ) if "llm_proj" in key: _lowerCamelCase : Dict = key.replace('llm_proj' , 'language_projection' ) if "t5_proj" in key: _lowerCamelCase : List[Any] = key.replace('t5_proj' , 'language_projection' ) if key.startswith('llm_model' ): _lowerCamelCase : int = key.replace('llm_model' , 'language_model' ) if key.startswith('t5' ): _lowerCamelCase : Optional[Any] = key.replace('t5' , 'language' ) _lowerCamelCase : Optional[int] = val # read in qv biases read_in_q_v_bias(lowercase__ , lowercase__ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(lowercase__ , strict=lowercase__ ) _lowerCamelCase : Any = load_demo_image() _lowerCamelCase : str = 'What is unusual about this image?' # create processor _lowerCamelCase : List[str] = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=lowercase__ , image_std=lowercase__ ) _lowerCamelCase : List[Any] = InstructBlipProcessor( image_processor=lowercase__ , tokenizer=lowercase__ , qformer_tokenizer=lowercase__ , ) _lowerCamelCase : List[str] = processor(images=lowercase__ , text=lowercase__ , return_tensors='pt' ).to(lowercase__ ) # make sure processor creates exact same pixel values _lowerCamelCase : List[Any] = vis_processors['eval'](lowercase__ ).unsqueeze(0 ).to(lowercase__ ) _lowerCamelCase : Any = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowercase__ ) original_model.to(lowercase__ ) hf_model.to(lowercase__ ) with torch.no_grad(): if "vicuna" in model_name: _lowerCamelCase : List[Any] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits _lowerCamelCase : List[str] = hf_model(**lowercase__ ).logits else: _lowerCamelCase : Dict = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits _lowerCamelCase : Tuple = tokenizer('\n' , return_tensors='pt' ).input_ids.to(lowercase__ ) _lowerCamelCase : List[str] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) _lowerCamelCase : Dict = hf_model(**lowercase__ , labels=lowercase__ ).logits print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape _lowerCamelCase : str = 1E-4 if 'vicuna' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , lowercase__ , atol=lowercase__ ) print('Looks ok!' ) print('Generating with original model...' ) _lowerCamelCase : str = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) _lowerCamelCase : Optional[int] = hf_model.generate( **lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? _lowerCamelCase : Tuple = 2 print('Original generation:' , lowercase__ ) _lowerCamelCase : Tuple = processor.batch_decode(lowercase__ , skip_special_tokens=lowercase__ ) _lowerCamelCase : List[Any] = [text.strip() for text in output_text] print('HF generation:' , lowercase__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(lowercase__ ) hf_model.save_pretrained(lowercase__ ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() lowercase__ = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) lowercase__ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
360
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase__ = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
12
0
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ , lowercase__ ): # Checks if the entire collection has been sorted if len(lowercase__ ) <= 1 or n <= 1: return insert_next(lowercase__ , n - 1 ) rec_insertion_sort(lowercase__ , n - 1 ) def _snake_case ( lowercase__ , lowercase__ ): # Checks order between adjacent elements if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order _lowerCamelCase : Optional[Any] = ( collection[index], collection[index - 1], ) insert_next(lowercase__ , index + 1 ) if __name__ == "__main__": lowercase__ = input("""Enter integers separated by spaces: """) lowercase__ = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
361
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): if attention_mask is None: _lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = OPTConfig lowerCamelCase__ = {} lowerCamelCase__ = """gelu""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ): _lowerCamelCase : Tuple = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Tuple = seq_length _lowerCamelCase : str = is_training _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : List[Any] = eos_token_id _lowerCamelCase : Tuple = pad_token_id _lowerCamelCase : List[str] = bos_token_id _lowerCamelCase : Optional[int] = embed_dim _lowerCamelCase : List[str] = word_embed_proj_dim _lowerCamelCase : Any = False def A_ ( self ): _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCamelCase : Tuple = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , ) _lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase ) return config, inputs_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase ) _lowerCamelCase : Optional[Any] = inputs_dict['input_ids'] _lowerCamelCase : str = input_ids[:1, :] _lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :] _lowerCamelCase : Optional[Any] = 1 # first forward pass _lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) _lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) _lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0] _lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else () lowerCamelCase__ = ( {"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = 10 def A_ ( self ): _lowerCamelCase : int = TFOPTModelTester(self ) _lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase , lowercase ): if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings _lowerCamelCase : Optional[int] = model_class(config=lowercase ) _lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase ) _lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. _lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase ) # check that weights remain the same after resizing _lowerCamelCase : int = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Optional[Any] = False self.assertTrue(lowercase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase ) _lowerCamelCase : Dict = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Union[str, Any] = False self.assertTrue(lowercase ) def _snake_case ( lowercase__ ): return tf.constant(lowercase__ , dtype=tf.intaa ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = 99 def A_ ( self ): _lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2 _lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) _lowerCamelCase : int = input_ids.shape[0] _lowerCamelCase : List[Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' ) _lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id ) with tf.GradientTape(): _lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state _lowerCamelCase : Optional[Any] = (1, 11, 512) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : List[str] = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) ) _lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): super().setUp() _lowerCamelCase : List[Any] = 'facebook/opt-350m' def A_ ( self ): _lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model ) _lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model ) _lowerCamelCase : List[str] = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase ) _lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) _lowerCamelCase : Any = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) _lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def A_ ( self ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def A_ ( self ): _lowerCamelCase : str = 'facebook/opt-125m' _lowerCamelCase : Dict = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : int = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = 'facebook/opt-350m' _lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase ) _lowerCamelCase : Any = 'left' # use different length sentences to test batching _lowerCamelCase : Optional[int] = [ 'Hello, my dog is a little', 'Today, I', ] _lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase ) _lowerCamelCase : int = inputs['input_ids'] _lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] ) _lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase ) _lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) _lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) _lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) def A_ ( self ): _lowerCamelCase : Tuple = 'facebook/opt-350m' _lowerCamelCase : List[Any] = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase )
12
0
"""simple docstring""" import os import sys lowercase__ = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowercase__ = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def _snake_case ( *lowercase__ , **lowercase__ ): return AutoConfig.from_pretrained(*lowercase__ , **lowercase__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _snake_case ( *lowercase__ , **lowercase__ ): return AutoTokenizer.from_pretrained(*lowercase__ , **lowercase__ ) @add_start_docstrings(AutoModel.__doc__ ) def _snake_case ( *lowercase__ , **lowercase__ ): return AutoModel.from_pretrained(*lowercase__ , **lowercase__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _snake_case ( *lowercase__ , **lowercase__ ): return AutoModelForCausalLM.from_pretrained(*lowercase__ , **lowercase__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _snake_case ( *lowercase__ , **lowercase__ ): return AutoModelForMaskedLM.from_pretrained(*lowercase__ , **lowercase__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _snake_case ( *lowercase__ , **lowercase__ ): return AutoModelForSequenceClassification.from_pretrained(*lowercase__ , **lowercase__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _snake_case ( *lowercase__ , **lowercase__ ): return AutoModelForQuestionAnswering.from_pretrained(*lowercase__ , **lowercase__ )
362
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """philschmid/bart-large-cnn-samsum""" lowerCamelCase__ = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) lowerCamelCase__ = """summarizer""" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = ["""text"""] lowerCamelCase__ = ["""text"""] def A_ ( self , lowercase ): return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase ) def A_ ( self , lowercase ): return self.model.generate(**lowercase )[0] def A_ ( self , lowercase ): return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
12
0
"""simple docstring""" from typing import Any def _snake_case ( lowercase__ ): if not input_list: return [] _lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list] _lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
363
"""simple docstring""" from __future__ import annotations def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) ) _lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )] index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ ) _lowerCamelCase : float = 0 _lowerCamelCase : list[float] = [0] * len(lowercase__ ) for i in index: if weight[i] <= capacity: _lowerCamelCase : int = 1 max_value += value[i] capacity -= weight[i] else: _lowerCamelCase : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" from bisect import bisect from itertools import accumulate def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Any = sorted(zip(lowercase__ , lowercase__ ) , key=lambda lowercase__ : x[0] / x[1] , reverse=lowercase__ ) _lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r] _lowerCamelCase : List[str] = list(accumulate(lowercase__ ) ) _lowerCamelCase : List[Any] = bisect(lowercase__ , lowercase__ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
364
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowercase__ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowercase__ = [] lowercase__ = [] lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowercase__ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", """emoji""": True, }, } ] lowercase__ = 0 for log in Path().glob("""*.log"""): lowercase__ = 0 with open(log, """r""") as f: for line in f: lowercase__ = json.loads(line) if line.get("""nodeid""", """""") != "": lowercase__ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowercase__ = F"{line['duration']:.4f}" if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowercase__ = [] log.unlink() lowercase__ = """""" lowercase__ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowercase__ = [] lowercase__ = {} for test in failed_tests: lowercase__ = test[0].split("""::""") lowercase__ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowercase__ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowercase__ = [test[0] for test in failed_table] lowercase__ = list(set(files)) # Count number of instances in failed_tests lowercase__ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowercase__ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: lowercase__ = """Too many failed tests, please see the full report in the Action results.""" lowercase__ = len(err) + 10 lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}" print(F"### {message}") else: lowercase__ = """No failed tests! 🤗""" print(F"## {message}") payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) lowercase__ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowercase__ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowercase__ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowercase__ = row[0] else: lowercase__ = """""" lowercase__ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
12
0
"""simple docstring""" lowercase__ = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
365
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = """AutoTokenizer""" lowerCamelCase__ = ["""tokenizer"""] lowerCamelCase__ = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__( self , lowercase , lowercase=None ): super().__init__(lowercase ) _lowerCamelCase : Optional[int] = speaker_embeddings @classmethod def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ): if speaker_embeddings_dict_path is not None: _lowerCamelCase : Optional[Any] = get_file_from_repo( lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if speaker_embeddings_path is None: logger.warning( F'''`{os.path.join(lowercase , lowercase )}` does not exists , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' ) _lowerCamelCase : List[Any] = None else: with open(lowercase ) as speaker_embeddings_json: _lowerCamelCase : Union[str, Any] = json.load(lowercase ) else: _lowerCamelCase : Tuple = None _lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase ) return cls(tokenizer=lowercase , speaker_embeddings=lowercase ) def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase ) _lowerCamelCase : int = {} _lowerCamelCase : List[Any] = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase ) _lowerCamelCase : Any = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , ) _lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' ) _lowerCamelCase : Optional[Any] = tmp_dict with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp: json.dump(lowercase , lowercase ) super().save_pretrained(lowercase , lowercase , **lowercase ) def A_ ( self , lowercase = None , **lowercase ): _lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset] _lowerCamelCase : Any = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' ) _lowerCamelCase : Union[str, Any] = get_file_from_repo( self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , ) if path is None: raise ValueError( F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} embeddings.''' ) _lowerCamelCase : List[str] = np.load(lowercase ) return voice_preset_dict def A_ ( self , lowercase = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' ) def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ): if voice_preset is not None and not isinstance(lowercase , lowercase ): if ( isinstance(lowercase , lowercase ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _lowerCamelCase : Any = self._load_voice_preset(lowercase ) else: if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ): _lowerCamelCase : Optional[Any] = voice_preset + '.npz' _lowerCamelCase : Union[str, Any] = np.load(lowercase ) if voice_preset is not None: self._validate_voice_preset_dict(lowercase , **lowercase ) _lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase ) _lowerCamelCase : Any = self.tokenizer( lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , ) if voice_preset is not None: _lowerCamelCase : Optional[int] = voice_preset return encoded_text
12
0
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: lowercase__ = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=None , lowercase=True , lowercase=True , lowercase=None , ): _lowerCamelCase : List[str] = size if size is not None else {'height': 20, 'width': 20} _lowerCamelCase : Dict = parent _lowerCamelCase : Union[str, Any] = batch_size _lowerCamelCase : Optional[Any] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : Tuple = min_resolution _lowerCamelCase : Optional[Any] = max_resolution _lowerCamelCase : Union[str, Any] = size _lowerCamelCase : Dict = do_normalize _lowerCamelCase : Dict = do_convert_rgb _lowerCamelCase : Union[str, Any] = [512, 1024, 2048, 4096] _lowerCamelCase : Optional[int] = patch_size if patch_size is not None else {'height': 16, 'width': 16} def A_ ( self ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def A_ ( self ): _lowerCamelCase : str = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg' _lowerCamelCase : Optional[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11, reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""", ) @require_torch @require_vision class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = PixaStructImageProcessor if is_vision_available() else None def A_ ( self ): _lowerCamelCase : Optional[Any] = PixaStructImageProcessingTester(self ) @property def A_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def A_ ( self ): _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , 'do_normalize' ) ) self.assertTrue(hasattr(lowercase , 'do_convert_rgb' ) ) def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.image_processor_tester.prepare_dummy_image() _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) _lowerCamelCase : List[Any] = 2048 _lowerCamelCase : str = image_processor(lowercase , return_tensors='pt' , max_patches=lowercase ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) ) def A_ ( self ): # Initialize image_processor _lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input _lowerCamelCase : int = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCamelCase : Optional[int] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : Union[str, Any] = image_processor( lowercase , return_tensors='pt' , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def A_ ( self ): # Initialize image_processor _lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input _lowerCamelCase : Optional[int] = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 _lowerCamelCase : List[Any] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowercase ): _lowerCamelCase : Union[str, Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=lowercase ).flattened_patches _lowerCamelCase : Optional[int] = 'Hello' _lowerCamelCase : int = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=lowercase , header_text=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : List[Any] = image_processor( lowercase , return_tensors='pt' , max_patches=lowercase , header_text=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def A_ ( self ): # Initialize image_processor _lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , np.ndarray ) _lowerCamelCase : Tuple = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCamelCase : Dict = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : Dict = image_processor( lowercase , return_tensors='pt' , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def A_ ( self ): # Initialize image_processor _lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , torch.Tensor ) # Test not batched input _lowerCamelCase : Tuple = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCamelCase : Optional[Any] = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : Dict = image_processor( lowercase , return_tensors='pt' , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11, reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""", ) @require_torch @require_vision class lowerCAmelCase__ ( lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = PixaStructImageProcessor if is_vision_available() else None def A_ ( self ): _lowerCamelCase : str = PixaStructImageProcessingTester(self , num_channels=4 ) _lowerCamelCase : Union[str, Any] = 3 @property def A_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def A_ ( self ): _lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase , 'do_normalize' ) ) self.assertTrue(hasattr(lowercase , 'do_convert_rgb' ) ) def A_ ( self ): # Initialize image_processor _lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase ) for image in image_inputs: self.assertIsInstance(lowercase , Image.Image ) # Test not batched input _lowerCamelCase : Dict = ( (self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width']) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input _lowerCamelCase : int = image_processor( image_inputs[0] , return_tensors='pt' , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched _lowerCamelCase : int = image_processor( lowercase , return_tensors='pt' , max_patches=lowercase ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
366
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device lowercase__ = False class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCamelCase : Dict = torch.manual_seed(0 ) _lowerCamelCase : Dict = pipe( image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images _lowerCamelCase : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
12
0
"""simple docstring""" import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def _snake_case ( lowercase__ , lowercase__=False ): _lowerCamelCase : List[Any] = OmegaConf.load(lowercase__ ) if display: print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) ) return config def _snake_case ( lowercase__ , lowercase__=None , lowercase__=None ): if conf_path is None: _lowerCamelCase : List[Any] = './model_checkpoints/vqgan_only.yaml' _lowerCamelCase : int = load_config(lowercase__ , display=lowercase__ ) _lowerCamelCase : List[Any] = VQModel(**config.model.params ) if ckpt_path is None: _lowerCamelCase : List[str] = './model_checkpoints/vqgan_only.pt' _lowerCamelCase : str = torch.load(lowercase__ , map_location=lowercase__ ) if ".ckpt" in ckpt_path: _lowerCamelCase : Optional[Any] = sd['state_dict'] model.load_state_dict(lowercase__ , strict=lowercase__ ) model.to(lowercase__ ) del sd return model def _snake_case ( lowercase__ , lowercase__ ): _lowerCamelCase : str = model.encode(lowercase__ ) print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' ) _lowerCamelCase : int = model.decode(lowercase__ ) return xrec def _snake_case ( lowercase__ , lowercase__=False ): _lowerCamelCase : Optional[int] = string.rsplit('.' , 1 ) if reload: _lowerCamelCase : str = importlib.import_module(lowercase__ ) importlib.reload(lowercase__ ) return getattr(importlib.import_module(lowercase__ , package=lowercase__ ) , cls ) def _snake_case ( lowercase__ ): if "target" not in config: raise KeyError('Expected key `target` to instantiate.' ) return get_obj_from_str(config['target'] )(**config.get('params' , {} ) ) def _snake_case ( lowercase__ , lowercase__ , lowercase__=True , lowercase__=True ): _lowerCamelCase : Optional[int] = instantiate_from_config(lowercase__ ) if sd is not None: model.load_state_dict(lowercase__ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): # load the specified checkpoint if ckpt: _lowerCamelCase : str = torch.load(lowercase__ , map_location='cpu' ) _lowerCamelCase : int = pl_sd['global_step'] print(f'''loaded model from global step {global_step}.''' ) else: _lowerCamelCase : Any = {'state_dict': None} _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : Union[str, Any] = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=lowercase__ , eval_mode=lowercase__ )['model'] return model, global_step
367
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency lowercase__ = { """E""": 12.70, """T""": 9.06, """A""": 8.17, """O""": 7.51, """I""": 6.97, """N""": 6.75, """S""": 6.33, """H""": 6.09, """R""": 5.99, """D""": 4.25, """L""": 4.03, """C""": 2.78, """U""": 2.76, """M""": 2.41, """W""": 2.36, """F""": 2.23, """G""": 2.02, """Y""": 1.97, """P""": 1.93, """B""": 1.29, """V""": 0.98, """K""": 0.77, """J""": 0.15, """X""": 0.15, """Q""": 0.10, """Z""": 0.07, } lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ""" lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" def _snake_case ( lowercase__ ): _lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _snake_case ( lowercase__ ): return x[0] def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = get_letter_count(lowercase__ ) _lowerCamelCase : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(lowercase__ ) _lowerCamelCase : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ ) _lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] ) _lowerCamelCase : Any = list(freq_to_letter_str.items() ) freq_pairs.sort(key=lowercase__ , reverse=lowercase__ ) _lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(lowercase__ ) def _snake_case ( lowercase__ ): _lowerCamelCase : str = get_frequency_order(lowercase__ ) _lowerCamelCase : Union[str, Any] = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
12
0
"""simple docstring""" import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowercase__ ) _lowerCamelCase : Tuple = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: _lowerCamelCase : Optional[int] = dataset_size < in_memory_max_size else: _lowerCamelCase : Tuple = False _lowerCamelCase : int = is_small_dataset(lowercase__ ) assert result == expected
368
"""simple docstring""" import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase , lowercase ): _lowerCamelCase : Dict = question_encoder _lowerCamelCase : List[Any] = generator _lowerCamelCase : Optional[Any] = self.question_encoder def A_ ( self , lowercase ): if os.path.isfile(lowercase ): raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(lowercase , exist_ok=lowercase ) _lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' ) _lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' ) self.question_encoder.save_pretrained(lowercase ) self.generator.save_pretrained(lowercase ) @classmethod def A_ ( cls , lowercase , **lowercase ): # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer _lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase ) if config is None: _lowerCamelCase : int = RagConfig.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' ) _lowerCamelCase : Dict = AutoTokenizer.from_pretrained( lowercase , config=config.generator , subfolder='generator_tokenizer' ) return cls(question_encoder=lowercase , generator=lowercase ) def __call__( self , *lowercase , **lowercase ): return self.current_tokenizer(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.generator.decode(*lowercase , **lowercase ) def A_ ( self ): _lowerCamelCase : Any = self.question_encoder def A_ ( self ): _lowerCamelCase : Optional[Any] = self.generator def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ): warnings.warn( '`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ' 'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ' 'context manager to prepare your targets. See the documentation of your specific tokenizer for more ' 'details' , lowercase , ) if max_length is None: _lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length _lowerCamelCase : Optional[Any] = self( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _lowerCamelCase : int = self.current_tokenizer.model_max_length _lowerCamelCase : str = self( text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , ) _lowerCamelCase : int = labels['input_ids'] return model_inputs
12
0
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""image_processor""", """tokenizer"""] lowerCamelCase__ = """BlipImageProcessor""" lowerCamelCase__ = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self , lowercase , lowercase ): _lowerCamelCase : Tuple = False super().__init__(lowercase , lowercase ) _lowerCamelCase : Tuple = self.image_processor def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: _lowerCamelCase : List[Any] = self.tokenizer _lowerCamelCase : List[str] = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) return text_encoding # add pixel_values _lowerCamelCase : Optional[int] = self.image_processor(lowercase , return_tensors=lowercase ) if text is not None: _lowerCamelCase : Optional[int] = self.tokenizer( text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , ) else: _lowerCamelCase : str = None if text_encoding is not None: encoding_image_processor.update(lowercase ) return encoding_image_processor def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.batch_decode(*lowercase , **lowercase ) def A_ ( self , *lowercase , **lowercase ): return self.tokenizer.decode(*lowercase , **lowercase ) @property def A_ ( self ): _lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names _lowerCamelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
369
"""simple docstring""" def _snake_case ( lowercase__ = 10 ): if not isinstance(lowercase__ , lowercase__ ) or n < 0: raise ValueError('Invalid input' ) _lowerCamelCase : str = 10**n _lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"{solution(10) = }")
12
0
"""simple docstring""" def _snake_case ( lowercase__ , lowercase__ ): if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
370
"""simple docstring""" import argparse import datetime def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } _lowerCamelCase : str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowercase__ ) < 11: raise ValueError('Must be 10 characters long' ) # Get month _lowerCamelCase : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) _lowerCamelCase : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day _lowerCamelCase : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator _lowerCamelCase : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year _lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation _lowerCamelCase : str = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) ) # Start math if m <= 2: _lowerCamelCase : str = y - 1 _lowerCamelCase : Tuple = m + 12 # maths var _lowerCamelCase : int = int(str(lowercase__ )[:2] ) _lowerCamelCase : int = int(str(lowercase__ )[2:] ) _lowerCamelCase : int = int(2.6 * m - 5.3_9 ) _lowerCamelCase : int = int(c / 4 ) _lowerCamelCase : int = int(k / 4 ) _lowerCamelCase : int = int(d + k ) _lowerCamelCase : int = int(t + u + v + x ) _lowerCamelCase : int = int(z - (2 * c) ) _lowerCamelCase : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response _lowerCamelCase : str = f'''Your date {date_input}, is a {days[str(lowercase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() lowercase__ = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) lowercase__ = parser.parse_args() zeller(args.date_input)
12
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 lowercase__ = get_tests_dir("""fixtures""") lowercase__ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") lowercase__ = get_tests_dir("""fixtures/dummy-config.json""") class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : str = 0 def A_ ( self ): _lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): with tempfile.TemporaryDirectory() as tmpdirname: _lowerCamelCase : int = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _lowerCamelCase : str = AutoFeatureExtractor.from_pretrained(lowercase ).to_dict() config_dict.pop('feature_extractor_type' ) _lowerCamelCase : List[str] = WavaVecaFeatureExtractor(**lowercase ) # save in new folder model_config.save_pretrained(lowercase ) config.save_pretrained(lowercase ) _lowerCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(lowercase ) # make sure private variable is not incorrectly saved _lowerCamelCase : Union[str, Any] = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) def A_ ( self ): with self.assertRaisesRegex( lowercase , 'bert-base is not a local folder and is not a valid model identifier' ): _lowerCamelCase : int = AutoFeatureExtractor.from_pretrained('bert-base' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): _lowerCamelCase : Dict = AutoFeatureExtractor.from_pretrained(lowercase , revision='aaaaaa' ) def A_ ( self ): with self.assertRaisesRegex( lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ): _lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' ) def A_ ( self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowercase ): _lowerCamelCase : Tuple = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowercase ): _lowerCamelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase ) _lowerCamelCase : int = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowercase ) _lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained(lowercase , trust_remote_code=lowercase ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) def A_ ( self ): try: AutoConfig.register('custom' , lowercase ) AutoFeatureExtractor.register(lowercase , lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowercase ): AutoFeatureExtractor.register(lowercase , lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API _lowerCamelCase : List[Any] = CustomFeatureExtractor.from_pretrained(lowercase ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowercase ) _lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(lowercase ) self.assertIsInstance(lowercase , lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def A_ ( self ): class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = True try: AutoConfig.register('custom' , lowercase ) AutoFeatureExtractor.register(lowercase , lowercase ) # If remote code is not set, the default is to use local _lowerCamelCase : Dict = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. _lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub _lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=lowercase ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(not hasattr(lowercase , 'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
371
"""simple docstring""" import re def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[int] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' ) if match := re.search(lowercase__ , lowercase__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator("""+918827897895"""))
12
0
"""simple docstring""" def _snake_case ( lowercase__ ): _lowerCamelCase : Dict = set() # edges = list of graph's edges _lowerCamelCase : Optional[Any] = get_edges(lowercase__ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: _lowerCamelCase : str = edges.pop() chosen_vertices.add(lowercase__ ) chosen_vertices.add(lowercase__ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(lowercase__ ) return chosen_vertices def _snake_case ( lowercase__ ): _lowerCamelCase : str = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
350
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path lowercase__ = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} lowercase__ = """zero2""" lowercase__ = """zero3""" lowercase__ = [ZEROa, ZEROa] def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test lowercase__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( lowercase ): '''simple docstring''' @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) @require_torch_multi_gpu @parameterized.expand(lowercase , name_func=lowercase ) def A_ ( self , lowercase , lowercase ): self.run_and_check( stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , ) def A_ ( self , lowercase ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = models[model] _lowerCamelCase : Optional[int] = self.run_trainer( stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , ) self.do_checks(lowercase ) return output_dir def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ): _lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase ) _lowerCamelCase : Any = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowercase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _lowerCamelCase : Dict = self.get_launcher(lowercase ) _lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase , env=self.get_env() ) return output_dir def A_ ( self , lowercase=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
12
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , *lowercase , **lowercase ): warnings.warn( 'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use OwlViTImageProcessor instead.' , lowercase , ) super().__init__(*lowercase , **lowercase )
351
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowercase__ = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase ): '''simple docstring''' lowerCamelCase__ = ["""pixel_values"""] def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ): super().__init__(**lowercase ) _lowerCamelCase : Optional[Any] = do_rescale _lowerCamelCase : Union[str, Any] = rescale_factor _lowerCamelCase : Any = do_pad _lowerCamelCase : Optional[int] = pad_size def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ): return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase ) def A_ ( self , lowercase , lowercase , lowercase = None ): _lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase ) _lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height _lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase ) def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ): _lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale _lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad _lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size _lowerCamelCase : Dict = make_list_of_images(lowercase ) if not valid_images(lowercase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. _lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images] if do_rescale: _lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images] if do_pad: _lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images] _lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images] _lowerCamelCase : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=lowercase , tensor_type=lowercase )
12
0